| File: | build-scan/../src/basic/hashmap.c |
| Warning: | line 1792, column 25 Use of zero-allocated memory |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* SPDX-License-Identifier: LGPL-2.1+ */ | |||
| 2 | ||||
| 3 | #include <errno(*__errno_location ()).h> | |||
| 4 | #include <stdint.h> | |||
| 5 | #include <stdlib.h> | |||
| 6 | #include <string.h> | |||
| 7 | ||||
| 8 | #include "alloc-util.h" | |||
| 9 | #include "hashmap.h" | |||
| 10 | #include "fileio.h" | |||
| 11 | #include "macro.h" | |||
| 12 | #include "mempool.h" | |||
| 13 | #include "process-util.h" | |||
| 14 | #include "random-util.h" | |||
| 15 | #include "set.h" | |||
| 16 | #include "siphash24.h" | |||
| 17 | #include "string-util.h" | |||
| 18 | #include "strv.h" | |||
| 19 | #include "util.h" | |||
| 20 | ||||
| 21 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 22 | #include <pthread.h> | |||
| 23 | #include "list.h" | |||
| 24 | #endif | |||
| 25 | ||||
| 26 | /* | |||
| 27 | * Implementation of hashmaps. | |||
| 28 | * Addressing: open | |||
| 29 | * - uses less RAM compared to closed addressing (chaining), because | |||
| 30 | * our entries are small (especially in Sets, which tend to contain | |||
| 31 | * the majority of entries in systemd). | |||
| 32 | * Collision resolution: Robin Hood | |||
| 33 | * - tends to equalize displacement of entries from their optimal buckets. | |||
| 34 | * Probe sequence: linear | |||
| 35 | * - though theoretically worse than random probing/uniform hashing/double | |||
| 36 | * hashing, it is good for cache locality. | |||
| 37 | * | |||
| 38 | * References: | |||
| 39 | * Celis, P. 1986. Robin Hood Hashing. | |||
| 40 | * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada. | |||
| 41 | * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf | |||
| 42 | * - The results are derived for random probing. Suggests deletion with | |||
| 43 | * tombstones and two mean-centered search methods. None of that works | |||
| 44 | * well for linear probing. | |||
| 45 | * | |||
| 46 | * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies. | |||
| 47 | * ACM Trans. Algorithms 1, 2 (October 2005), 177-213. | |||
| 48 | * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964 | |||
| 49 | * http://www.math.uu.se/~svante/papers/sj157.pdf | |||
| 50 | * - Applies to Robin Hood with linear probing. Contains remarks on | |||
| 51 | * the unsuitability of mean-centered search with linear probing. | |||
| 52 | * | |||
| 53 | * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing. | |||
| 54 | * ACM Trans. Algorithms 1, 2 (October 2005), 214-242. | |||
| 55 | * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965 | |||
| 56 | * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes | |||
| 57 | * in a successful search), and Janson writes about displacement. C = d + 1. | |||
| 58 | * | |||
| 59 | * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion. | |||
| 60 | * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/ | |||
| 61 | * - Explanation of backward shift deletion with pictures. | |||
| 62 | * | |||
| 63 | * Khuong, P. 2013. The Other Robin Hood Hashing. | |||
| 64 | * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/ | |||
| 65 | * - Short summary of random vs. linear probing, and tombstones vs. backward shift. | |||
| 66 | */ | |||
| 67 | ||||
| 68 | /* | |||
| 69 | * XXX Ideas for improvement: | |||
| 70 | * For unordered hashmaps, randomize iteration order, similarly to Perl: | |||
| 71 | * http://blog.booking.com/hardening-perls-hash-function.html | |||
| 72 | */ | |||
| 73 | ||||
| 74 | /* INV_KEEP_FREE = 1 / (1 - max_load_factor) | |||
| 75 | * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */ | |||
| 76 | #define INV_KEEP_FREE5U 5U | |||
| 77 | ||||
| 78 | /* Fields common to entries of all hashmap/set types */ | |||
| 79 | struct hashmap_base_entry { | |||
| 80 | const void *key; | |||
| 81 | }; | |||
| 82 | ||||
| 83 | /* Entry types for specific hashmap/set types | |||
| 84 | * hashmap_base_entry must be at the beginning of each entry struct. */ | |||
| 85 | ||||
| 86 | struct plain_hashmap_entry { | |||
| 87 | struct hashmap_base_entry b; | |||
| 88 | void *value; | |||
| 89 | }; | |||
| 90 | ||||
| 91 | struct ordered_hashmap_entry { | |||
| 92 | struct plain_hashmap_entry p; | |||
| 93 | unsigned iterate_next, iterate_previous; | |||
| 94 | }; | |||
| 95 | ||||
| 96 | struct set_entry { | |||
| 97 | struct hashmap_base_entry b; | |||
| 98 | }; | |||
| 99 | ||||
| 100 | /* In several functions it is advantageous to have the hash table extended | |||
| 101 | * virtually by a couple of additional buckets. We reserve special index values | |||
| 102 | * for these "swap" buckets. */ | |||
| 103 | #define _IDX_SWAP_BEGIN((2147483647 *2U +1U) - 3) (UINT_MAX(2147483647 *2U +1U) - 3) | |||
| 104 | #define IDX_PUT(((2147483647 *2U +1U) - 3) + 0) (_IDX_SWAP_BEGIN((2147483647 *2U +1U) - 3) + 0) | |||
| 105 | #define IDX_TMP(((2147483647 *2U +1U) - 3) + 1) (_IDX_SWAP_BEGIN((2147483647 *2U +1U) - 3) + 1) | |||
| 106 | #define _IDX_SWAP_END(((2147483647 *2U +1U) - 3) + 2) (_IDX_SWAP_BEGIN((2147483647 *2U +1U) - 3) + 2) | |||
| 107 | ||||
| 108 | #define IDX_FIRST((2147483647 *2U +1U) - 1) (UINT_MAX(2147483647 *2U +1U) - 1) /* special index for freshly initialized iterators */ | |||
| 109 | #define IDX_NIL(2147483647 *2U +1U) UINT_MAX(2147483647 *2U +1U) /* special index value meaning "none" or "end" */ | |||
| 110 | ||||
| 111 | assert_cc(IDX_FIRST == _IDX_SWAP_END)GCC diagnostic push
; GCC diagnostic ignored "-Wdeclaration-after-statement" ; struct _assert_struct_10 { char x[(((2147483647 *2U +1U) - 1) == (((2147483647 *2U +1U) - 3) + 2)) ? 0 : -1]; }; GCC diagnostic pop ; | |||
| 112 | assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST)GCC diagnostic push
; GCC diagnostic ignored "-Wdeclaration-after-statement" ; struct _assert_struct_11 { char x[(((2147483647 *2U +1U) - 1) == ((2147483647 *2U +1U) - 1)) ? 0 : -1]; }; GCC diagnostic pop ; | |||
| 113 | ||||
| 114 | /* Storage space for the "swap" buckets. | |||
| 115 | * All entry types can fit into a ordered_hashmap_entry. */ | |||
| 116 | struct swap_entries { | |||
| 117 | struct ordered_hashmap_entry e[_IDX_SWAP_END(((2147483647 *2U +1U) - 3) + 2) - _IDX_SWAP_BEGIN((2147483647 *2U +1U) - 3)]; | |||
| 118 | }; | |||
| 119 | ||||
| 120 | /* Distance from Initial Bucket */ | |||
| 121 | typedef uint8_t dib_raw_t; | |||
| 122 | #define DIB_RAW_OVERFLOW((dib_raw_t)0xfdU) ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */ | |||
| 123 | #define DIB_RAW_REHASH((dib_raw_t)0xfeU) ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */ | |||
| 124 | #define DIB_RAW_FREE((dib_raw_t)0xffU) ((dib_raw_t)0xffU) /* a free bucket */ | |||
| 125 | #define DIB_RAW_INIT((char)((dib_raw_t)0xffU)) ((char)DIB_RAW_FREE((dib_raw_t)0xffU)) /* a byte to memset a DIB store with when initializing */ | |||
| 126 | ||||
| 127 | #define DIB_FREE(2147483647 *2U +1U) UINT_MAX(2147483647 *2U +1U) | |||
| 128 | ||||
| 129 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 130 | struct hashmap_debug_info { | |||
| 131 | LIST_FIELDS(struct hashmap_debug_info, debug_list); | |||
| 132 | unsigned max_entries; /* high watermark of n_entries */ | |||
| 133 | ||||
| 134 | /* who allocated this hashmap */ | |||
| 135 | int line; | |||
| 136 | const char *file; | |||
| 137 | const char *func; | |||
| 138 | ||||
| 139 | /* fields to detect modification while iterating */ | |||
| 140 | unsigned put_count; /* counts puts into the hashmap */ | |||
| 141 | unsigned rem_count; /* counts removals from hashmap */ | |||
| 142 | unsigned last_rem_idx; /* remembers last removal index */ | |||
| 143 | }; | |||
| 144 | ||||
| 145 | /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */ | |||
| 146 | static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list); | |||
| 147 | static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER; | |||
| 148 | ||||
| 149 | #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug; | |||
| 150 | ||||
| 151 | #else /* !ENABLE_DEBUG_HASHMAP */ | |||
| 152 | #define HASHMAP_DEBUG_FIELDS | |||
| 153 | #endif /* ENABLE_DEBUG_HASHMAP */ | |||
| 154 | ||||
| 155 | enum HashmapType { | |||
| 156 | HASHMAP_TYPE_PLAIN, | |||
| 157 | HASHMAP_TYPE_ORDERED, | |||
| 158 | HASHMAP_TYPE_SET, | |||
| 159 | _HASHMAP_TYPE_MAX | |||
| 160 | }; | |||
| 161 | ||||
| 162 | struct _packed___attribute__ ((packed)) indirect_storage { | |||
| 163 | void *storage; /* where buckets and DIBs are stored */ | |||
| 164 | uint8_t hash_key[HASH_KEY_SIZE16]; /* hash key; changes during resize */ | |||
| 165 | ||||
| 166 | unsigned n_entries; /* number of stored entries */ | |||
| 167 | unsigned n_buckets; /* number of buckets */ | |||
| 168 | ||||
| 169 | unsigned idx_lowest_entry; /* Index below which all buckets are free. | |||
| 170 | Makes "while(hashmap_steal_first())" loops | |||
| 171 | O(n) instead of O(n^2) for unordered hashmaps. */ | |||
| 172 | uint8_t _pad[3]; /* padding for the whole HashmapBase */ | |||
| 173 | /* The bitfields in HashmapBase complete the alignment of the whole thing. */ | |||
| 174 | }; | |||
| 175 | ||||
| 176 | struct direct_storage { | |||
| 177 | /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit. | |||
| 178 | * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit, | |||
| 179 | * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */ | |||
| 180 | uint8_t storage[sizeof(struct indirect_storage)]; | |||
| 181 | }; | |||
| 182 | ||||
| 183 | #define DIRECT_BUCKETS(entry_t)(sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t ))) \ | |||
| 184 | (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t))) | |||
| 185 | ||||
| 186 | /* We should be able to store at least one entry directly. */ | |||
| 187 | assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1)GCC diagnostic push
; GCC diagnostic ignored "-Wdeclaration-after-statement" ; struct _assert_struct_12 { char x[((sizeof(struct direct_storage ) / (sizeof(struct ordered_hashmap_entry) + sizeof(dib_raw_t) )) >= 1) ? 0 : -1]; }; GCC diagnostic pop ; | |||
| 188 | ||||
| 189 | /* We have 3 bits for n_direct_entries. */ | |||
| 190 | assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3))GCC diagnostic push
; GCC diagnostic ignored "-Wdeclaration-after-statement" ; struct _assert_struct_13 { char x[((sizeof(struct direct_storage ) / (sizeof(struct set_entry) + sizeof(dib_raw_t))) < (1 << 3)) ? 0 : -1]; }; GCC diagnostic pop ; | |||
| 191 | ||||
| 192 | /* Hashmaps with directly stored entries all use this shared hash key. | |||
| 193 | * It's no big deal if the key is guessed, because there can be only | |||
| 194 | * a handful of directly stored entries in a hashmap. When a hashmap | |||
| 195 | * outgrows direct storage, it gets its own key for indirect storage. */ | |||
| 196 | static uint8_t shared_hash_key[HASH_KEY_SIZE16]; | |||
| 197 | static bool_Bool shared_hash_key_initialized; | |||
| 198 | ||||
| 199 | /* Fields that all hashmap/set types must have */ | |||
| 200 | struct HashmapBase { | |||
| 201 | const struct hash_ops *hash_ops; /* hash and compare ops to use */ | |||
| 202 | ||||
| 203 | union _packed___attribute__ ((packed)) { | |||
| 204 | struct indirect_storage indirect; /* if has_indirect */ | |||
| 205 | struct direct_storage direct; /* if !has_indirect */ | |||
| 206 | }; | |||
| 207 | ||||
| 208 | enum HashmapType type:2; /* HASHMAP_TYPE_* */ | |||
| 209 | bool_Bool has_indirect:1; /* whether indirect storage is used */ | |||
| 210 | unsigned n_direct_entries:3; /* Number of entries in direct storage. | |||
| 211 | * Only valid if !has_indirect. */ | |||
| 212 | bool_Bool from_pool:1; /* whether was allocated from mempool */ | |||
| 213 | bool_Bool dirty:1; /* whether dirtied since last iterated_cache_get() */ | |||
| 214 | bool_Bool cached:1; /* whether this hashmap is being cached */ | |||
| 215 | HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */ | |||
| 216 | }; | |||
| 217 | ||||
| 218 | /* Specific hash types | |||
| 219 | * HashmapBase must be at the beginning of each hashmap struct. */ | |||
| 220 | ||||
| 221 | struct Hashmap { | |||
| 222 | struct HashmapBase b; | |||
| 223 | }; | |||
| 224 | ||||
| 225 | struct OrderedHashmap { | |||
| 226 | struct HashmapBase b; | |||
| 227 | unsigned iterate_list_head, iterate_list_tail; | |||
| 228 | }; | |||
| 229 | ||||
| 230 | struct Set { | |||
| 231 | struct HashmapBase b; | |||
| 232 | }; | |||
| 233 | ||||
| 234 | typedef struct CacheMem { | |||
| 235 | const void **ptr; | |||
| 236 | size_t n_populated, n_allocated; | |||
| 237 | bool_Bool active:1; | |||
| 238 | } CacheMem; | |||
| 239 | ||||
| 240 | struct IteratedCache { | |||
| 241 | HashmapBase *hashmap; | |||
| 242 | CacheMem keys, values; | |||
| 243 | }; | |||
| 244 | ||||
| 245 | DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8)static struct mempool hashmap_pool = { .tile_size = sizeof(Hashmap ), .at_least = 8, }; | |||
| 246 | DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8)static struct mempool ordered_hashmap_pool = { .tile_size = sizeof (OrderedHashmap), .at_least = 8, }; | |||
| 247 | /* No need for a separate Set pool */ | |||
| 248 | assert_cc(sizeof(Hashmap) == sizeof(Set))GCC diagnostic push
; GCC diagnostic ignored "-Wdeclaration-after-statement" ; struct _assert_struct_14 { char x[(sizeof(Hashmap) == sizeof (Set)) ? 0 : -1]; }; GCC diagnostic pop ; | |||
| 249 | ||||
| 250 | struct hashmap_type_info { | |||
| 251 | size_t head_size; | |||
| 252 | size_t entry_size; | |||
| 253 | struct mempool *mempool; | |||
| 254 | unsigned n_direct_buckets; | |||
| 255 | }; | |||
| 256 | ||||
| 257 | static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = { | |||
| 258 | [HASHMAP_TYPE_PLAIN] = { | |||
| 259 | .head_size = sizeof(Hashmap), | |||
| 260 | .entry_size = sizeof(struct plain_hashmap_entry), | |||
| 261 | .mempool = &hashmap_pool, | |||
| 262 | .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry)(sizeof(struct direct_storage) / (sizeof(struct plain_hashmap_entry ) + sizeof(dib_raw_t))), | |||
| 263 | }, | |||
| 264 | [HASHMAP_TYPE_ORDERED] = { | |||
| 265 | .head_size = sizeof(OrderedHashmap), | |||
| 266 | .entry_size = sizeof(struct ordered_hashmap_entry), | |||
| 267 | .mempool = &ordered_hashmap_pool, | |||
| 268 | .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry)(sizeof(struct direct_storage) / (sizeof(struct ordered_hashmap_entry ) + sizeof(dib_raw_t))), | |||
| 269 | }, | |||
| 270 | [HASHMAP_TYPE_SET] = { | |||
| 271 | .head_size = sizeof(Set), | |||
| 272 | .entry_size = sizeof(struct set_entry), | |||
| 273 | .mempool = &hashmap_pool, | |||
| 274 | .n_direct_buckets = DIRECT_BUCKETS(struct set_entry)(sizeof(struct direct_storage) / (sizeof(struct set_entry) + sizeof (dib_raw_t))), | |||
| 275 | }, | |||
| 276 | }; | |||
| 277 | ||||
| 278 | #if VALGRIND0 | |||
| 279 | __attribute__((destructor)) static void cleanup_pools(void) { | |||
| 280 | _cleanup_free___attribute__((cleanup(freep))) char *t = NULL((void*)0); | |||
| 281 | int r; | |||
| 282 | ||||
| 283 | /* Be nice to valgrind */ | |||
| 284 | ||||
| 285 | /* The pool is only allocated by the main thread, but the memory can | |||
| 286 | * be passed to other threads. Let's clean up if we are the main thread | |||
| 287 | * and no other threads are live. */ | |||
| 288 | if (!is_main_thread()) | |||
| 289 | return; | |||
| 290 | ||||
| 291 | r = get_proc_field("/proc/self/status", "Threads", WHITESPACE" \t\n\r", &t); | |||
| 292 | if (r < 0 || !streq(t, "1")(strcmp((t),("1")) == 0)) | |||
| 293 | return; | |||
| 294 | ||||
| 295 | mempool_drop(&hashmap_pool); | |||
| 296 | mempool_drop(&ordered_hashmap_pool); | |||
| 297 | } | |||
| 298 | #endif | |||
| 299 | ||||
| 300 | static unsigned n_buckets(HashmapBase *h) { | |||
| 301 | return h->has_indirect ? h->indirect.n_buckets | |||
| 302 | : hashmap_type_info[h->type].n_direct_buckets; | |||
| 303 | } | |||
| 304 | ||||
| 305 | static unsigned n_entries(HashmapBase *h) { | |||
| 306 | return h->has_indirect ? h->indirect.n_entries | |||
| 307 | : h->n_direct_entries; | |||
| 308 | } | |||
| 309 | ||||
| 310 | static void n_entries_inc(HashmapBase *h) { | |||
| 311 | if (h->has_indirect) | |||
| 312 | h->indirect.n_entries++; | |||
| 313 | else | |||
| 314 | h->n_direct_entries++; | |||
| 315 | } | |||
| 316 | ||||
| 317 | static void n_entries_dec(HashmapBase *h) { | |||
| 318 | if (h->has_indirect) | |||
| 319 | h->indirect.n_entries--; | |||
| 320 | else | |||
| 321 | h->n_direct_entries--; | |||
| 322 | } | |||
| 323 | ||||
| 324 | static void *storage_ptr(HashmapBase *h) { | |||
| 325 | return h->has_indirect ? h->indirect.storage | |||
| 326 | : h->direct.storage; | |||
| 327 | } | |||
| 328 | ||||
| 329 | static uint8_t *hash_key(HashmapBase *h) { | |||
| 330 | return h->has_indirect ? h->indirect.hash_key | |||
| 331 | : shared_hash_key; | |||
| 332 | } | |||
| 333 | ||||
| 334 | static unsigned base_bucket_hash(HashmapBase *h, const void *p) { | |||
| 335 | struct siphash state; | |||
| 336 | uint64_t hash; | |||
| 337 | ||||
| 338 | siphash24_init(&state, hash_key(h)); | |||
| 339 | ||||
| 340 | h->hash_ops->hash(p, &state); | |||
| 341 | ||||
| 342 | hash = siphash24_finalize(&state); | |||
| 343 | ||||
| 344 | return (unsigned) (hash % n_buckets(h)); | |||
| 345 | } | |||
| 346 | #define bucket_hash(h, p)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), p) base_bucket_hash(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), p) | |||
| 347 | ||||
| 348 | static inline void base_set_dirty(HashmapBase *h) { | |||
| 349 | h->dirty = true1; | |||
| 350 | } | |||
| 351 | #define hashmap_set_dirty(h)base_set_dirty(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0)) base_set_dirty(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0)) | |||
| 352 | ||||
| 353 | static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE16], bool_Bool reuse_is_ok) { | |||
| 354 | static uint8_t current[HASH_KEY_SIZE16]; | |||
| 355 | static bool_Bool current_initialized = false0; | |||
| 356 | ||||
| 357 | /* Returns a hash function key to use. In order to keep things | |||
| 358 | * fast we will not generate a new key each time we allocate a | |||
| 359 | * new hash table. Instead, we'll just reuse the most recently | |||
| 360 | * generated one, except if we never generated one or when we | |||
| 361 | * are rehashing an entire hash table because we reached a | |||
| 362 | * fill level */ | |||
| 363 | ||||
| 364 | if (!current_initialized || !reuse_is_ok) { | |||
| 365 | random_bytes(current, sizeof(current)); | |||
| 366 | current_initialized = true1; | |||
| 367 | } | |||
| 368 | ||||
| 369 | memcpy(hash_key, current, sizeof(current)); | |||
| 370 | } | |||
| 371 | ||||
| 372 | static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) { | |||
| 373 | return (struct hashmap_base_entry*) | |||
| 374 | ((uint8_t*) storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size); | |||
| 375 | } | |||
| 376 | ||||
| 377 | static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) { | |||
| 378 | return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx); | |||
| 379 | } | |||
| 380 | ||||
| 381 | static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) { | |||
| 382 | return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx); | |||
| 383 | } | |||
| 384 | ||||
| 385 | static struct set_entry *set_bucket_at(Set *h, unsigned idx) { | |||
| 386 | return (struct set_entry*) bucket_at(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx); | |||
| 387 | } | |||
| 388 | ||||
| 389 | static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) { | |||
| 390 | return &swap->e[idx - _IDX_SWAP_BEGIN((2147483647 *2U +1U) - 3)]; | |||
| 391 | } | |||
| 392 | ||||
| 393 | /* Returns a pointer to the bucket at index idx. | |||
| 394 | * Understands real indexes and swap indexes, hence "_virtual". */ | |||
| 395 | static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap, | |||
| 396 | unsigned idx) { | |||
| 397 | if (idx < _IDX_SWAP_BEGIN((2147483647 *2U +1U) - 3)) | |||
| 398 | return bucket_at(h, idx); | |||
| 399 | ||||
| 400 | if (idx < _IDX_SWAP_END(((2147483647 *2U +1U) - 3) + 2)) | |||
| 401 | return &bucket_at_swap(swap, idx)->p.b; | |||
| 402 | ||||
| 403 | assert_not_reached("Invalid index")do { log_assert_failed_unreachable_realm(LOG_REALM_SYSTEMD, ( "Invalid index"), "../src/basic/hashmap.c", 403, __PRETTY_FUNCTION__ ); } while (0); | |||
| 404 | } | |||
| 405 | ||||
| 406 | static dib_raw_t *dib_raw_ptr(HashmapBase *h) { | |||
| 407 | return (dib_raw_t*) | |||
| 408 | ((uint8_t*) storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h)); | |||
| 409 | } | |||
| 410 | ||||
| 411 | static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) { | |||
| 412 | return idx >= from ? idx - from | |||
| 413 | : n_buckets(h) + idx - from; | |||
| 414 | } | |||
| 415 | ||||
| 416 | static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) { | |||
| 417 | unsigned initial_bucket; | |||
| 418 | ||||
| 419 | if (raw_dib == DIB_RAW_FREE((dib_raw_t)0xffU)) | |||
| 420 | return DIB_FREE(2147483647 *2U +1U); | |||
| 421 | ||||
| 422 | if (_likely_(raw_dib < DIB_RAW_OVERFLOW)(__builtin_expect(!!(raw_dib < ((dib_raw_t)0xfdU)),1))) | |||
| 423 | return raw_dib; | |||
| 424 | ||||
| 425 | /* | |||
| 426 | * Having an overflow DIB value is very unlikely. The hash function | |||
| 427 | * would have to be bad. For example, in a table of size 2^24 filled | |||
| 428 | * to load factor 0.9 the maximum observed DIB is only about 60. | |||
| 429 | * In theory (assuming I used Maxima correctly), for an infinite size | |||
| 430 | * hash table with load factor 0.8 the probability of a given entry | |||
| 431 | * having DIB > 40 is 1.9e-8. | |||
| 432 | * This returns the correct DIB value by recomputing the hash value in | |||
| 433 | * the unlikely case. XXX Hitting this case could be a hint to rehash. | |||
| 434 | */ | |||
| 435 | initial_bucket = bucket_hash(h, bucket_at(h, idx)->key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), bucket_at(h, idx)->key); | |||
| 436 | return bucket_distance(h, idx, initial_bucket); | |||
| 437 | } | |||
| 438 | ||||
| 439 | static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) { | |||
| 440 | dib_raw_ptr(h)[idx] = dib != DIB_FREE(2147483647 *2U +1U) ? MIN(dib, DIB_RAW_OVERFLOW)__extension__ ({ const typeof((dib)) __unique_prefix_A15 = (( dib)); const typeof((((dib_raw_t)0xfdU))) __unique_prefix_B16 = ((((dib_raw_t)0xfdU))); __unique_prefix_A15 < __unique_prefix_B16 ? __unique_prefix_A15 : __unique_prefix_B16; }) : DIB_RAW_FREE((dib_raw_t)0xffU); | |||
| 441 | } | |||
| 442 | ||||
| 443 | static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) { | |||
| 444 | dib_raw_t *dibs; | |||
| 445 | ||||
| 446 | dibs = dib_raw_ptr(h); | |||
| 447 | ||||
| 448 | for ( ; idx < n_buckets(h); idx++) | |||
| 449 | if (dibs[idx] != DIB_RAW_FREE((dib_raw_t)0xffU)) | |||
| 450 | return idx; | |||
| 451 | ||||
| 452 | return IDX_NIL(2147483647 *2U +1U); | |||
| 453 | } | |||
| 454 | ||||
| 455 | static void bucket_mark_free(HashmapBase *h, unsigned idx) { | |||
| 456 | memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size)({ size_t _l_ = (hashmap_type_info[h->type].entry_size); void *_x_ = (bucket_at(h, idx)); _l_ == 0 ? _x_ : memset(_x_, 0, _l_ ); }); | |||
| 457 | bucket_set_dib(h, idx, DIB_FREE(2147483647 *2U +1U)); | |||
| 458 | } | |||
| 459 | ||||
| 460 | static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap, | |||
| 461 | unsigned from, unsigned to) { | |||
| 462 | struct hashmap_base_entry *e_from, *e_to; | |||
| 463 | ||||
| 464 | assert(from != to)do { if ((__builtin_expect(!!(!(from != to)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("from != to"), "../src/basic/hashmap.c", 464, __PRETTY_FUNCTION__); } while (0); | |||
| 465 | ||||
| 466 | e_from = bucket_at_virtual(h, swap, from); | |||
| 467 | e_to = bucket_at_virtual(h, swap, to); | |||
| 468 | ||||
| 469 | memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size); | |||
| 470 | ||||
| 471 | if (h->type == HASHMAP_TYPE_ORDERED) { | |||
| 472 | OrderedHashmap *lh = (OrderedHashmap*) h; | |||
| 473 | struct ordered_hashmap_entry *le, *le_to; | |||
| 474 | ||||
| 475 | le_to = (struct ordered_hashmap_entry*) e_to; | |||
| 476 | ||||
| 477 | if (le_to->iterate_next != IDX_NIL(2147483647 *2U +1U)) { | |||
| 478 | le = (struct ordered_hashmap_entry*) | |||
| 479 | bucket_at_virtual(h, swap, le_to->iterate_next); | |||
| 480 | le->iterate_previous = to; | |||
| 481 | } | |||
| 482 | ||||
| 483 | if (le_to->iterate_previous != IDX_NIL(2147483647 *2U +1U)) { | |||
| 484 | le = (struct ordered_hashmap_entry*) | |||
| 485 | bucket_at_virtual(h, swap, le_to->iterate_previous); | |||
| 486 | le->iterate_next = to; | |||
| 487 | } | |||
| 488 | ||||
| 489 | if (lh->iterate_list_head == from) | |||
| 490 | lh->iterate_list_head = to; | |||
| 491 | if (lh->iterate_list_tail == from) | |||
| 492 | lh->iterate_list_tail = to; | |||
| 493 | } | |||
| 494 | } | |||
| 495 | ||||
| 496 | static unsigned next_idx(HashmapBase *h, unsigned idx) { | |||
| 497 | return (idx + 1U) % n_buckets(h); | |||
| 498 | } | |||
| 499 | ||||
| 500 | static unsigned prev_idx(HashmapBase *h, unsigned idx) { | |||
| 501 | return (n_buckets(h) + idx - 1U) % n_buckets(h); | |||
| 502 | } | |||
| 503 | ||||
| 504 | static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) { | |||
| 505 | switch (h->type) { | |||
| 506 | ||||
| 507 | case HASHMAP_TYPE_PLAIN: | |||
| 508 | case HASHMAP_TYPE_ORDERED: | |||
| 509 | return ((struct plain_hashmap_entry*)e)->value; | |||
| 510 | ||||
| 511 | case HASHMAP_TYPE_SET: | |||
| 512 | return (void*) e->key; | |||
| 513 | ||||
| 514 | default: | |||
| 515 | assert_not_reached("Unknown hashmap type")do { log_assert_failed_unreachable_realm(LOG_REALM_SYSTEMD, ( "Unknown hashmap type"), "../src/basic/hashmap.c", 515, __PRETTY_FUNCTION__ ); } while (0); | |||
| 516 | } | |||
| 517 | } | |||
| 518 | ||||
| 519 | static void base_remove_entry(HashmapBase *h, unsigned idx) { | |||
| 520 | unsigned left, right, prev, dib; | |||
| 521 | dib_raw_t raw_dib, *dibs; | |||
| 522 | ||||
| 523 | dibs = dib_raw_ptr(h); | |||
| 524 | assert(dibs[idx] != DIB_RAW_FREE)do { if ((__builtin_expect(!!(!(dibs[idx] != ((dib_raw_t)0xffU ))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("dibs[idx] != DIB_RAW_FREE" ), "../src/basic/hashmap.c", 524, __PRETTY_FUNCTION__); } while (0); | |||
| 525 | ||||
| 526 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 527 | h->debug.rem_count++; | |||
| 528 | h->debug.last_rem_idx = idx; | |||
| 529 | #endif | |||
| 530 | ||||
| 531 | left = idx; | |||
| 532 | /* Find the stop bucket ("right"). It is either free or has DIB == 0. */ | |||
| 533 | for (right = next_idx(h, left); ; right = next_idx(h, right)) { | |||
| 534 | raw_dib = dibs[right]; | |||
| 535 | if (IN_SET(raw_dib, 0, DIB_RAW_FREE)({ _Bool _found = 0; static __attribute__ ((unused)) char _static_assert__macros_need_to_be_extended [20 - sizeof((int[]){0, ((dib_raw_t)0xffU)})/sizeof(int)]; switch (raw_dib) { case 0: case ((dib_raw_t)0xffU): _found = 1; break ; default: break; } _found; })) | |||
| 536 | break; | |||
| 537 | ||||
| 538 | /* The buckets are not supposed to be all occupied and with DIB > 0. | |||
| 539 | * That would mean we could make everyone better off by shifting them | |||
| 540 | * backward. This scenario is impossible. */ | |||
| 541 | assert(left != right)do { if ((__builtin_expect(!!(!(left != right)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("left != right"), "../src/basic/hashmap.c" , 541, __PRETTY_FUNCTION__); } while (0); | |||
| 542 | } | |||
| 543 | ||||
| 544 | if (h->type == HASHMAP_TYPE_ORDERED) { | |||
| 545 | OrderedHashmap *lh = (OrderedHashmap*) h; | |||
| 546 | struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx); | |||
| 547 | ||||
| 548 | if (le->iterate_next != IDX_NIL(2147483647 *2U +1U)) | |||
| 549 | ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous; | |||
| 550 | else | |||
| 551 | lh->iterate_list_tail = le->iterate_previous; | |||
| 552 | ||||
| 553 | if (le->iterate_previous != IDX_NIL(2147483647 *2U +1U)) | |||
| 554 | ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next; | |||
| 555 | else | |||
| 556 | lh->iterate_list_head = le->iterate_next; | |||
| 557 | } | |||
| 558 | ||||
| 559 | /* Now shift all buckets in the interval (left, right) one step backwards */ | |||
| 560 | for (prev = left, left = next_idx(h, left); left != right; | |||
| 561 | prev = left, left = next_idx(h, left)) { | |||
| 562 | dib = bucket_calculate_dib(h, left, dibs[left]); | |||
| 563 | assert(dib != 0)do { if ((__builtin_expect(!!(!(dib != 0)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("dib != 0"), "../src/basic/hashmap.c", 563 , __PRETTY_FUNCTION__); } while (0); | |||
| 564 | bucket_move_entry(h, NULL((void*)0), left, prev); | |||
| 565 | bucket_set_dib(h, prev, dib - 1); | |||
| 566 | } | |||
| 567 | ||||
| 568 | bucket_mark_free(h, prev); | |||
| 569 | n_entries_dec(h); | |||
| 570 | base_set_dirty(h); | |||
| 571 | } | |||
| 572 | #define remove_entry(h, idx)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx) base_remove_entry(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx) | |||
| 573 | ||||
| 574 | static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) { | |||
| 575 | struct ordered_hashmap_entry *e; | |||
| 576 | unsigned idx; | |||
| 577 | ||||
| 578 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 578, __PRETTY_FUNCTION__ ); } while (0); | |||
| 579 | assert(i)do { if ((__builtin_expect(!!(!(i)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("i"), "../src/basic/hashmap.c", 579, __PRETTY_FUNCTION__ ); } while (0); | |||
| 580 | ||||
| 581 | if (i->idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 582 | goto at_end; | |||
| 583 | ||||
| 584 | if (i->idx == IDX_FIRST((2147483647 *2U +1U) - 1) && h->iterate_list_head == IDX_NIL(2147483647 *2U +1U)) | |||
| 585 | goto at_end; | |||
| 586 | ||||
| 587 | if (i->idx == IDX_FIRST((2147483647 *2U +1U) - 1)) { | |||
| 588 | idx = h->iterate_list_head; | |||
| 589 | e = ordered_bucket_at(h, idx); | |||
| 590 | } else { | |||
| 591 | idx = i->idx; | |||
| 592 | e = ordered_bucket_at(h, idx); | |||
| 593 | /* | |||
| 594 | * We allow removing the current entry while iterating, but removal may cause | |||
| 595 | * a backward shift. The next entry may thus move one bucket to the left. | |||
| 596 | * To detect when it happens, we remember the key pointer of the entry we were | |||
| 597 | * going to iterate next. If it does not match, there was a backward shift. | |||
| 598 | */ | |||
| 599 | if (e->p.b.key != i->next_key) { | |||
| 600 | idx = prev_idx(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx); | |||
| 601 | e = ordered_bucket_at(h, idx); | |||
| 602 | } | |||
| 603 | assert(e->p.b.key == i->next_key)do { if ((__builtin_expect(!!(!(e->p.b.key == i->next_key )),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("e->p.b.key == i->next_key" ), "../src/basic/hashmap.c", 603, __PRETTY_FUNCTION__); } while (0); | |||
| 604 | } | |||
| 605 | ||||
| 606 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 607 | i->prev_idx = idx; | |||
| 608 | #endif | |||
| 609 | ||||
| 610 | if (e->iterate_next != IDX_NIL(2147483647 *2U +1U)) { | |||
| 611 | struct ordered_hashmap_entry *n; | |||
| 612 | i->idx = e->iterate_next; | |||
| 613 | n = ordered_bucket_at(h, i->idx); | |||
| 614 | i->next_key = n->p.b.key; | |||
| 615 | } else | |||
| 616 | i->idx = IDX_NIL(2147483647 *2U +1U); | |||
| 617 | ||||
| 618 | return idx; | |||
| 619 | ||||
| 620 | at_end: | |||
| 621 | i->idx = IDX_NIL(2147483647 *2U +1U); | |||
| 622 | return IDX_NIL(2147483647 *2U +1U); | |||
| 623 | } | |||
| 624 | ||||
| 625 | static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) { | |||
| 626 | unsigned idx; | |||
| 627 | ||||
| 628 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 628, __PRETTY_FUNCTION__ ); } while (0); | |||
| 629 | assert(i)do { if ((__builtin_expect(!!(!(i)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("i"), "../src/basic/hashmap.c", 629, __PRETTY_FUNCTION__ ); } while (0); | |||
| 630 | ||||
| 631 | if (i->idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 632 | goto at_end; | |||
| 633 | ||||
| 634 | if (i->idx == IDX_FIRST((2147483647 *2U +1U) - 1)) { | |||
| 635 | /* fast forward to the first occupied bucket */ | |||
| 636 | if (h->has_indirect) { | |||
| 637 | i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry); | |||
| 638 | h->indirect.idx_lowest_entry = i->idx; | |||
| 639 | } else | |||
| 640 | i->idx = skip_free_buckets(h, 0); | |||
| 641 | ||||
| 642 | if (i->idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 643 | goto at_end; | |||
| 644 | } else { | |||
| 645 | struct hashmap_base_entry *e; | |||
| 646 | ||||
| 647 | assert(i->idx > 0)do { if ((__builtin_expect(!!(!(i->idx > 0)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("i->idx > 0"), "../src/basic/hashmap.c" , 647, __PRETTY_FUNCTION__); } while (0); | |||
| 648 | ||||
| 649 | e = bucket_at(h, i->idx); | |||
| 650 | /* | |||
| 651 | * We allow removing the current entry while iterating, but removal may cause | |||
| 652 | * a backward shift. The next entry may thus move one bucket to the left. | |||
| 653 | * To detect when it happens, we remember the key pointer of the entry we were | |||
| 654 | * going to iterate next. If it does not match, there was a backward shift. | |||
| 655 | */ | |||
| 656 | if (e->key != i->next_key) | |||
| 657 | e = bucket_at(h, --i->idx); | |||
| 658 | ||||
| 659 | assert(e->key == i->next_key)do { if ((__builtin_expect(!!(!(e->key == i->next_key)) ,0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("e->key == i->next_key" ), "../src/basic/hashmap.c", 659, __PRETTY_FUNCTION__); } while (0); | |||
| 660 | } | |||
| 661 | ||||
| 662 | idx = i->idx; | |||
| 663 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 664 | i->prev_idx = idx; | |||
| 665 | #endif | |||
| 666 | ||||
| 667 | i->idx = skip_free_buckets(h, i->idx + 1); | |||
| 668 | if (i->idx != IDX_NIL(2147483647 *2U +1U)) | |||
| 669 | i->next_key = bucket_at(h, i->idx)->key; | |||
| 670 | else | |||
| 671 | i->idx = IDX_NIL(2147483647 *2U +1U); | |||
| 672 | ||||
| 673 | return idx; | |||
| 674 | ||||
| 675 | at_end: | |||
| 676 | i->idx = IDX_NIL(2147483647 *2U +1U); | |||
| 677 | return IDX_NIL(2147483647 *2U +1U); | |||
| 678 | } | |||
| 679 | ||||
| 680 | static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) { | |||
| 681 | if (!h) { | |||
| 682 | i->idx = IDX_NIL(2147483647 *2U +1U); | |||
| 683 | return IDX_NIL(2147483647 *2U +1U); | |||
| 684 | } | |||
| 685 | ||||
| 686 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 687 | if (i->idx == IDX_FIRST((2147483647 *2U +1U) - 1)) { | |||
| 688 | i->put_count = h->debug.put_count; | |||
| 689 | i->rem_count = h->debug.rem_count; | |||
| 690 | } else { | |||
| 691 | /* While iterating, must not add any new entries */ | |||
| 692 | assert(i->put_count == h->debug.put_count)do { if ((__builtin_expect(!!(!(i->put_count == h->debug .put_count)),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ( "i->put_count == h->debug.put_count"), "../src/basic/hashmap.c" , 692, __PRETTY_FUNCTION__); } while (0); | |||
| 693 | /* ... or remove entries other than the current one */ | |||
| 694 | assert(i->rem_count == h->debug.rem_count ||do { if ((__builtin_expect(!!(!(i->rem_count == h->debug .rem_count || (i->rem_count == h->debug.rem_count - 1 && i->prev_idx == h->debug.last_rem_idx))),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("i->rem_count == h->debug.rem_count || (i->rem_count == h->debug.rem_count - 1 && i->prev_idx == h->debug.last_rem_idx)" ), "../src/basic/hashmap.c", 696, __PRETTY_FUNCTION__); } while (0) | |||
| 695 | (i->rem_count == h->debug.rem_count - 1 &&do { if ((__builtin_expect(!!(!(i->rem_count == h->debug .rem_count || (i->rem_count == h->debug.rem_count - 1 && i->prev_idx == h->debug.last_rem_idx))),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("i->rem_count == h->debug.rem_count || (i->rem_count == h->debug.rem_count - 1 && i->prev_idx == h->debug.last_rem_idx)" ), "../src/basic/hashmap.c", 696, __PRETTY_FUNCTION__); } while (0) | |||
| 696 | i->prev_idx == h->debug.last_rem_idx))do { if ((__builtin_expect(!!(!(i->rem_count == h->debug .rem_count || (i->rem_count == h->debug.rem_count - 1 && i->prev_idx == h->debug.last_rem_idx))),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("i->rem_count == h->debug.rem_count || (i->rem_count == h->debug.rem_count - 1 && i->prev_idx == h->debug.last_rem_idx)" ), "../src/basic/hashmap.c", 696, __PRETTY_FUNCTION__); } while (0); | |||
| 697 | /* Reset our removals counter */ | |||
| 698 | i->rem_count = h->debug.rem_count; | |||
| 699 | } | |||
| 700 | #endif | |||
| 701 | ||||
| 702 | return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i) | |||
| 703 | : hashmap_iterate_in_internal_order(h, i); | |||
| 704 | } | |||
| 705 | ||||
| 706 | bool_Bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) { | |||
| 707 | struct hashmap_base_entry *e; | |||
| 708 | void *data; | |||
| 709 | unsigned idx; | |||
| 710 | ||||
| 711 | idx = hashmap_iterate_entry(h, i); | |||
| 712 | if (idx == IDX_NIL(2147483647 *2U +1U)) { | |||
| 713 | if (value) | |||
| 714 | *value = NULL((void*)0); | |||
| 715 | if (key) | |||
| 716 | *key = NULL((void*)0); | |||
| 717 | ||||
| 718 | return false0; | |||
| 719 | } | |||
| 720 | ||||
| 721 | e = bucket_at(h, idx); | |||
| 722 | data = entry_value(h, e); | |||
| 723 | if (value) | |||
| 724 | *value = data; | |||
| 725 | if (key) | |||
| 726 | *key = e->key; | |||
| 727 | ||||
| 728 | return true1; | |||
| 729 | } | |||
| 730 | ||||
| 731 | bool_Bool set_iterate(Set *s, Iterator *i, void **value) { | |||
| 732 | return internal_hashmap_iterate(HASHMAP_BASE(s)__builtin_choose_expr((__builtin_types_compatible_p(typeof(s) , HashmapBase*) || __builtin_types_compatible_p(typeof(s), Hashmap *) || __builtin_types_compatible_p(typeof(s), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(s), Set*)), (HashmapBase *)(s), (void)0), i, value, NULL((void*)0)); | |||
| 733 | } | |||
| 734 | ||||
| 735 | #define HASHMAP_FOREACH_IDX(idx, h, i)for ((i) = ((Iterator) { .idx = ((2147483647 *2U +1U) - 1), . next_key = ((void*)0) }), (idx) = hashmap_iterate_entry((h), & (i)); (idx != (2147483647 *2U +1U)); (idx) = hashmap_iterate_entry ((h), &(i))) \ | |||
| 736 | for ((i) = ITERATOR_FIRST((Iterator) { .idx = ((2147483647 *2U +1U) - 1), .next_key = ( (void*)0) }), (idx) = hashmap_iterate_entry((h), &(i)); \ | |||
| 737 | (idx != IDX_NIL(2147483647 *2U +1U)); \ | |||
| 738 | (idx) = hashmap_iterate_entry((h), &(i))) | |||
| 739 | ||||
| 740 | IteratedCache *internal_hashmap_iterated_cache_new(HashmapBase *h) { | |||
| 741 | IteratedCache *cache; | |||
| 742 | ||||
| 743 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 743, __PRETTY_FUNCTION__ ); } while (0); | |||
| 744 | assert(!h->cached)do { if ((__builtin_expect(!!(!(!h->cached)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("!h->cached"), "../src/basic/hashmap.c" , 744, __PRETTY_FUNCTION__); } while (0); | |||
| 745 | ||||
| 746 | if (h->cached) | |||
| 747 | return NULL((void*)0); | |||
| 748 | ||||
| 749 | cache = new0(IteratedCache, 1)((IteratedCache*) calloc((1), sizeof(IteratedCache))); | |||
| 750 | if (!cache) | |||
| 751 | return NULL((void*)0); | |||
| 752 | ||||
| 753 | cache->hashmap = h; | |||
| 754 | h->cached = true1; | |||
| 755 | ||||
| 756 | return cache; | |||
| 757 | } | |||
| 758 | ||||
| 759 | static void reset_direct_storage(HashmapBase *h) { | |||
| 760 | const struct hashmap_type_info *hi = &hashmap_type_info[h->type]; | |||
| 761 | void *p; | |||
| 762 | ||||
| 763 | assert(!h->has_indirect)do { if ((__builtin_expect(!!(!(!h->has_indirect)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("!h->has_indirect"), "../src/basic/hashmap.c" , 763, __PRETTY_FUNCTION__); } while (0); | |||
| 764 | ||||
| 765 | p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets); | |||
| 766 | memset(p, DIB_RAW_INIT((char)((dib_raw_t)0xffU)), sizeof(dib_raw_t) * hi->n_direct_buckets); | |||
| 767 | } | |||
| 768 | ||||
| 769 | static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) { | |||
| 770 | HashmapBase *h; | |||
| 771 | const struct hashmap_type_info *hi = &hashmap_type_info[type]; | |||
| 772 | bool_Bool use_pool; | |||
| 773 | ||||
| 774 | use_pool = is_main_thread(); | |||
| 775 | ||||
| 776 | h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size)(calloc(1, (hi->head_size))); | |||
| 777 | ||||
| 778 | if (!h) | |||
| 779 | return NULL((void*)0); | |||
| 780 | ||||
| 781 | h->type = type; | |||
| 782 | h->from_pool = use_pool; | |||
| 783 | h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops; | |||
| 784 | ||||
| 785 | if (type == HASHMAP_TYPE_ORDERED) { | |||
| 786 | OrderedHashmap *lh = (OrderedHashmap*)h; | |||
| 787 | lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL(2147483647 *2U +1U); | |||
| 788 | } | |||
| 789 | ||||
| 790 | reset_direct_storage(h); | |||
| 791 | ||||
| 792 | if (!shared_hash_key_initialized) { | |||
| 793 | random_bytes(shared_hash_key, sizeof(shared_hash_key)); | |||
| 794 | shared_hash_key_initialized= true1; | |||
| 795 | } | |||
| 796 | ||||
| 797 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 798 | h->debug.func = func; | |||
| 799 | h->debug.file = file; | |||
| 800 | h->debug.line = line; | |||
| 801 | assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0)do { if ((__builtin_expect(!!(!(pthread_mutex_lock(&hashmap_debug_list_mutex ) == 0)),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("pthread_mutex_lock(&hashmap_debug_list_mutex) == 0" ), "../src/basic/hashmap.c", 801, __PRETTY_FUNCTION__); } while (0); | |||
| 802 | LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug); | |||
| 803 | assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0)do { if ((__builtin_expect(!!(!(pthread_mutex_unlock(&hashmap_debug_list_mutex ) == 0)),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0" ), "../src/basic/hashmap.c", 803, __PRETTY_FUNCTION__); } while (0); | |||
| 804 | #endif | |||
| 805 | ||||
| 806 | return h; | |||
| 807 | } | |||
| 808 | ||||
| 809 | Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { | |||
| 810 | return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS); | |||
| 811 | } | |||
| 812 | ||||
| 813 | OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { | |||
| 814 | return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS); | |||
| 815 | } | |||
| 816 | ||||
| 817 | Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { | |||
| 818 | return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS); | |||
| 819 | } | |||
| 820 | ||||
| 821 | static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops, | |||
| 822 | enum HashmapType type HASHMAP_DEBUG_PARAMS) { | |||
| 823 | HashmapBase *q; | |||
| 824 | ||||
| 825 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 825, __PRETTY_FUNCTION__ ); } while (0); | |||
| 826 | ||||
| 827 | if (*h) | |||
| 828 | return 0; | |||
| 829 | ||||
| 830 | q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS); | |||
| 831 | if (!q) | |||
| 832 | return -ENOMEM12; | |||
| 833 | ||||
| 834 | *h = q; | |||
| 835 | return 0; | |||
| 836 | } | |||
| 837 | ||||
| 838 | int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { | |||
| 839 | return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS); | |||
| 840 | } | |||
| 841 | ||||
| 842 | int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { | |||
| 843 | return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS); | |||
| 844 | } | |||
| 845 | ||||
| 846 | int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) { | |||
| 847 | return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS); | |||
| 848 | } | |||
| 849 | ||||
| 850 | static void hashmap_free_no_clear(HashmapBase *h) { | |||
| 851 | assert(!h->has_indirect)do { if ((__builtin_expect(!!(!(!h->has_indirect)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("!h->has_indirect"), "../src/basic/hashmap.c" , 851, __PRETTY_FUNCTION__); } while (0); | |||
| 852 | assert(!h->n_direct_entries)do { if ((__builtin_expect(!!(!(!h->n_direct_entries)),0)) ) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("!h->n_direct_entries" ), "../src/basic/hashmap.c", 852, __PRETTY_FUNCTION__); } while (0); | |||
| 853 | ||||
| 854 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 855 | assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0)do { if ((__builtin_expect(!!(!(pthread_mutex_lock(&hashmap_debug_list_mutex ) == 0)),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("pthread_mutex_lock(&hashmap_debug_list_mutex) == 0" ), "../src/basic/hashmap.c", 855, __PRETTY_FUNCTION__); } while (0); | |||
| 856 | LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug); | |||
| 857 | assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0)do { if ((__builtin_expect(!!(!(pthread_mutex_unlock(&hashmap_debug_list_mutex ) == 0)),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0" ), "../src/basic/hashmap.c", 857, __PRETTY_FUNCTION__); } while (0); | |||
| 858 | #endif | |||
| 859 | ||||
| 860 | if (h->from_pool) | |||
| 861 | mempool_free_tile(hashmap_type_info[h->type].mempool, h); | |||
| 862 | else | |||
| 863 | free(h); | |||
| 864 | } | |||
| 865 | ||||
| 866 | HashmapBase *internal_hashmap_free(HashmapBase *h) { | |||
| 867 | ||||
| 868 | /* Free the hashmap, but nothing in it */ | |||
| 869 | ||||
| 870 | if (h) { | |||
| 871 | internal_hashmap_clear(h); | |||
| 872 | hashmap_free_no_clear(h); | |||
| 873 | } | |||
| 874 | ||||
| 875 | return NULL((void*)0); | |||
| 876 | } | |||
| 877 | ||||
| 878 | HashmapBase *internal_hashmap_free_free(HashmapBase *h) { | |||
| 879 | ||||
| 880 | /* Free the hashmap and all data objects in it, but not the | |||
| 881 | * keys */ | |||
| 882 | ||||
| 883 | if (h) { | |||
| 884 | internal_hashmap_clear_free(h); | |||
| 885 | hashmap_free_no_clear(h); | |||
| 886 | } | |||
| 887 | ||||
| 888 | return NULL((void*)0); | |||
| 889 | } | |||
| 890 | ||||
| 891 | Hashmap *hashmap_free_free_free(Hashmap *h) { | |||
| 892 | ||||
| 893 | /* Free the hashmap and all data and key objects in it */ | |||
| 894 | ||||
| 895 | if (h) { | |||
| 896 | hashmap_clear_free_free(h); | |||
| 897 | hashmap_free_no_clear(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0)); | |||
| 898 | } | |||
| 899 | ||||
| 900 | return NULL((void*)0); | |||
| 901 | } | |||
| 902 | ||||
| 903 | void internal_hashmap_clear(HashmapBase *h) { | |||
| 904 | if (!h) | |||
| 905 | return; | |||
| 906 | ||||
| 907 | if (h->has_indirect) { | |||
| 908 | free(h->indirect.storage); | |||
| 909 | h->has_indirect = false0; | |||
| 910 | } | |||
| 911 | ||||
| 912 | h->n_direct_entries = 0; | |||
| 913 | reset_direct_storage(h); | |||
| 914 | ||||
| 915 | if (h->type == HASHMAP_TYPE_ORDERED) { | |||
| 916 | OrderedHashmap *lh = (OrderedHashmap*) h; | |||
| 917 | lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL(2147483647 *2U +1U); | |||
| 918 | } | |||
| 919 | ||||
| 920 | base_set_dirty(h); | |||
| 921 | } | |||
| 922 | ||||
| 923 | void internal_hashmap_clear_free(HashmapBase *h) { | |||
| 924 | unsigned idx; | |||
| 925 | ||||
| 926 | if (!h) | |||
| 927 | return; | |||
| 928 | ||||
| 929 | for (idx = skip_free_buckets(h, 0); idx != IDX_NIL(2147483647 *2U +1U); | |||
| 930 | idx = skip_free_buckets(h, idx + 1)) | |||
| 931 | free(entry_value(h, bucket_at(h, idx))); | |||
| 932 | ||||
| 933 | internal_hashmap_clear(h); | |||
| 934 | } | |||
| 935 | ||||
| 936 | void hashmap_clear_free_free(Hashmap *h) { | |||
| 937 | unsigned idx; | |||
| 938 | ||||
| 939 | if (!h) | |||
| 940 | return; | |||
| 941 | ||||
| 942 | for (idx = skip_free_buckets(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), 0); idx != IDX_NIL(2147483647 *2U +1U); | |||
| 943 | idx = skip_free_buckets(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx + 1)) { | |||
| 944 | struct plain_hashmap_entry *e = plain_bucket_at(h, idx); | |||
| 945 | free((void*)e->b.key); | |||
| 946 | free(e->value); | |||
| 947 | } | |||
| 948 | ||||
| 949 | internal_hashmap_clear(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0)); | |||
| 950 | } | |||
| 951 | ||||
| 952 | static int resize_buckets(HashmapBase *h, unsigned entries_add); | |||
| 953 | ||||
| 954 | /* | |||
| 955 | * Finds an empty bucket to put an entry into, starting the scan at 'idx'. | |||
| 956 | * Performs Robin Hood swaps as it goes. The entry to put must be placed | |||
| 957 | * by the caller into swap slot IDX_PUT. | |||
| 958 | * If used for in-place resizing, may leave a displaced entry in swap slot | |||
| 959 | * IDX_PUT. Caller must rehash it next. | |||
| 960 | * Returns: true if it left a displaced entry to rehash next in IDX_PUT, | |||
| 961 | * false otherwise. | |||
| 962 | */ | |||
| 963 | static bool_Bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx, | |||
| 964 | struct swap_entries *swap) { | |||
| 965 | dib_raw_t raw_dib, *dibs; | |||
| 966 | unsigned dib, distance; | |||
| 967 | ||||
| 968 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 969 | h->debug.put_count++; | |||
| 970 | #endif | |||
| 971 | ||||
| 972 | dibs = dib_raw_ptr(h); | |||
| 973 | ||||
| 974 | for (distance = 0; ; distance++) { | |||
| 975 | raw_dib = dibs[idx]; | |||
| 976 | if (IN_SET(raw_dib, DIB_RAW_FREE, DIB_RAW_REHASH)({ _Bool _found = 0; static __attribute__ ((unused)) char _static_assert__macros_need_to_be_extended [20 - sizeof((int[]){((dib_raw_t)0xffU), ((dib_raw_t)0xfeU)}) /sizeof(int)]; switch(raw_dib) { case ((dib_raw_t)0xffU): case ((dib_raw_t)0xfeU): _found = 1; break; default: break; } _found ; })) { | |||
| 977 | if (raw_dib == DIB_RAW_REHASH((dib_raw_t)0xfeU)) | |||
| 978 | bucket_move_entry(h, swap, idx, IDX_TMP(((2147483647 *2U +1U) - 3) + 1)); | |||
| 979 | ||||
| 980 | if (h->has_indirect && h->indirect.idx_lowest_entry > idx) | |||
| 981 | h->indirect.idx_lowest_entry = idx; | |||
| 982 | ||||
| 983 | bucket_set_dib(h, idx, distance); | |||
| 984 | bucket_move_entry(h, swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0), idx); | |||
| 985 | if (raw_dib == DIB_RAW_REHASH((dib_raw_t)0xfeU)) { | |||
| 986 | bucket_move_entry(h, swap, IDX_TMP(((2147483647 *2U +1U) - 3) + 1), IDX_PUT(((2147483647 *2U +1U) - 3) + 0)); | |||
| 987 | return true1; | |||
| 988 | } | |||
| 989 | ||||
| 990 | return false0; | |||
| 991 | } | |||
| 992 | ||||
| 993 | dib = bucket_calculate_dib(h, idx, raw_dib); | |||
| 994 | ||||
| 995 | if (dib < distance) { | |||
| 996 | /* Found a wealthier entry. Go Robin Hood! */ | |||
| 997 | bucket_set_dib(h, idx, distance); | |||
| 998 | ||||
| 999 | /* swap the entries */ | |||
| 1000 | bucket_move_entry(h, swap, idx, IDX_TMP(((2147483647 *2U +1U) - 3) + 1)); | |||
| 1001 | bucket_move_entry(h, swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0), idx); | |||
| 1002 | bucket_move_entry(h, swap, IDX_TMP(((2147483647 *2U +1U) - 3) + 1), IDX_PUT(((2147483647 *2U +1U) - 3) + 0)); | |||
| 1003 | ||||
| 1004 | distance = dib; | |||
| 1005 | } | |||
| 1006 | ||||
| 1007 | idx = next_idx(h, idx); | |||
| 1008 | } | |||
| 1009 | } | |||
| 1010 | ||||
| 1011 | /* | |||
| 1012 | * Puts an entry into a hashmap, boldly - no check whether key already exists. | |||
| 1013 | * The caller must place the entry (only its key and value, not link indexes) | |||
| 1014 | * in swap slot IDX_PUT. | |||
| 1015 | * Caller must ensure: the key does not exist yet in the hashmap. | |||
| 1016 | * that resize is not needed if !may_resize. | |||
| 1017 | * Returns: 1 if entry was put successfully. | |||
| 1018 | * -ENOMEM if may_resize==true and resize failed with -ENOMEM. | |||
| 1019 | * Cannot return -ENOMEM if !may_resize. | |||
| 1020 | */ | |||
| 1021 | static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx, | |||
| 1022 | struct swap_entries *swap, bool_Bool may_resize) { | |||
| 1023 | struct ordered_hashmap_entry *new_entry; | |||
| 1024 | int r; | |||
| 1025 | ||||
| 1026 | assert(idx < n_buckets(h))do { if ((__builtin_expect(!!(!(idx < n_buckets(h))),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("idx < n_buckets(h)"), "../src/basic/hashmap.c" , 1026, __PRETTY_FUNCTION__); } while (0); | |||
| 1027 | ||||
| 1028 | new_entry = bucket_at_swap(swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0)); | |||
| 1029 | ||||
| 1030 | if (may_resize) { | |||
| 1031 | r = resize_buckets(h, 1); | |||
| 1032 | if (r < 0) | |||
| 1033 | return r; | |||
| 1034 | if (r > 0) | |||
| 1035 | idx = bucket_hash(h, new_entry->p.b.key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), new_entry->p.b.key); | |||
| 1036 | } | |||
| 1037 | assert(n_entries(h) < n_buckets(h))do { if ((__builtin_expect(!!(!(n_entries(h) < n_buckets(h ))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("n_entries(h) < n_buckets(h)" ), "../src/basic/hashmap.c", 1037, __PRETTY_FUNCTION__); } while (0); | |||
| 1038 | ||||
| 1039 | if (h->type == HASHMAP_TYPE_ORDERED) { | |||
| 1040 | OrderedHashmap *lh = (OrderedHashmap*) h; | |||
| 1041 | ||||
| 1042 | new_entry->iterate_next = IDX_NIL(2147483647 *2U +1U); | |||
| 1043 | new_entry->iterate_previous = lh->iterate_list_tail; | |||
| 1044 | ||||
| 1045 | if (lh->iterate_list_tail != IDX_NIL(2147483647 *2U +1U)) { | |||
| 1046 | struct ordered_hashmap_entry *old_tail; | |||
| 1047 | ||||
| 1048 | old_tail = ordered_bucket_at(lh, lh->iterate_list_tail); | |||
| 1049 | assert(old_tail->iterate_next == IDX_NIL)do { if ((__builtin_expect(!!(!(old_tail->iterate_next == ( 2147483647 *2U +1U))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD , ("old_tail->iterate_next == IDX_NIL"), "../src/basic/hashmap.c" , 1049, __PRETTY_FUNCTION__); } while (0); | |||
| 1050 | old_tail->iterate_next = IDX_PUT(((2147483647 *2U +1U) - 3) + 0); | |||
| 1051 | } | |||
| 1052 | ||||
| 1053 | lh->iterate_list_tail = IDX_PUT(((2147483647 *2U +1U) - 3) + 0); | |||
| 1054 | if (lh->iterate_list_head == IDX_NIL(2147483647 *2U +1U)) | |||
| 1055 | lh->iterate_list_head = IDX_PUT(((2147483647 *2U +1U) - 3) + 0); | |||
| 1056 | } | |||
| 1057 | ||||
| 1058 | assert_se(hashmap_put_robin_hood(h, idx, swap) == false)do { if ((__builtin_expect(!!(!(hashmap_put_robin_hood(h, idx , swap) == 0)),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD , ("hashmap_put_robin_hood(h, idx, swap) == false"), "../src/basic/hashmap.c" , 1058, __PRETTY_FUNCTION__); } while (0); | |||
| 1059 | ||||
| 1060 | n_entries_inc(h); | |||
| 1061 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 1062 | h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h))__extension__ ({ const typeof((h->debug.max_entries)) __unique_prefix_A17 = ((h->debug.max_entries)); const typeof((n_entries(h))) __unique_prefix_B18 = ((n_entries(h))); __unique_prefix_A17 > __unique_prefix_B18 ? __unique_prefix_A17 : __unique_prefix_B18; }); | |||
| 1063 | #endif | |||
| 1064 | ||||
| 1065 | base_set_dirty(h); | |||
| 1066 | ||||
| 1067 | return 1; | |||
| 1068 | } | |||
| 1069 | #define hashmap_put_boldly(h, idx, swap, may_resize)hashmap_base_put_boldly(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx, swap, may_resize) \ | |||
| 1070 | hashmap_base_put_boldly(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx, swap, may_resize) | |||
| 1071 | ||||
| 1072 | /* | |||
| 1073 | * Returns 0 if resize is not needed. | |||
| 1074 | * 1 if successfully resized. | |||
| 1075 | * -ENOMEM on allocation failure. | |||
| 1076 | */ | |||
| 1077 | static int resize_buckets(HashmapBase *h, unsigned entries_add) { | |||
| 1078 | struct swap_entries swap; | |||
| 1079 | void *new_storage; | |||
| 1080 | dib_raw_t *old_dibs, *new_dibs; | |||
| 1081 | const struct hashmap_type_info *hi; | |||
| 1082 | unsigned idx, optimal_idx; | |||
| 1083 | unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries; | |||
| 1084 | uint8_t new_shift; | |||
| 1085 | bool_Bool rehash_next; | |||
| 1086 | ||||
| 1087 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 1087, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1088 | ||||
| 1089 | hi = &hashmap_type_info[h->type]; | |||
| 1090 | new_n_entries = n_entries(h) + entries_add; | |||
| 1091 | ||||
| 1092 | /* overflow? */ | |||
| 1093 | if (_unlikely_(new_n_entries < entries_add)(__builtin_expect(!!(new_n_entries < entries_add),0))) | |||
| 1094 | return -ENOMEM12; | |||
| 1095 | ||||
| 1096 | /* For direct storage we allow 100% load, because it's tiny. */ | |||
| 1097 | if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets) | |||
| 1098 | return 0; | |||
| 1099 | ||||
| 1100 | /* | |||
| 1101 | * Load factor = n/m = 1 - (1/INV_KEEP_FREE). | |||
| 1102 | * From it follows: m = n + n/(INV_KEEP_FREE - 1) | |||
| 1103 | */ | |||
| 1104 | new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE5U - 1); | |||
| 1105 | /* overflow? */ | |||
| 1106 | if (_unlikely_(new_n_buckets < new_n_entries)(__builtin_expect(!!(new_n_buckets < new_n_entries),0))) | |||
| 1107 | return -ENOMEM12; | |||
| 1108 | ||||
| 1109 | if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t)))(__builtin_expect(!!(new_n_buckets > (2147483647 *2U +1U) / (hi->entry_size + sizeof(dib_raw_t))),0))) | |||
| 1110 | return -ENOMEM12; | |||
| 1111 | ||||
| 1112 | old_n_buckets = n_buckets(h); | |||
| 1113 | ||||
| 1114 | if (_likely_(new_n_buckets <= old_n_buckets)(__builtin_expect(!!(new_n_buckets <= old_n_buckets),1))) | |||
| 1115 | return 0; | |||
| 1116 | ||||
| 1117 | new_shift = log2u_round_up(MAX(__extension__ ({ const typeof((new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)))) __unique_prefix_A19 = ((new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)))); const typeof((2 * sizeof(struct direct_storage))) __unique_prefix_B20 = ((2 * sizeof(struct direct_storage))); __unique_prefix_A19 > __unique_prefix_B20 ? __unique_prefix_A19 : __unique_prefix_B20; }) | |||
| 1118 | new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),__extension__ ({ const typeof((new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)))) __unique_prefix_A19 = ((new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)))); const typeof((2 * sizeof(struct direct_storage))) __unique_prefix_B20 = ((2 * sizeof(struct direct_storage))); __unique_prefix_A19 > __unique_prefix_B20 ? __unique_prefix_A19 : __unique_prefix_B20; }) | |||
| 1119 | 2 * sizeof(struct direct_storage))__extension__ ({ const typeof((new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)))) __unique_prefix_A19 = ((new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)))); const typeof((2 * sizeof(struct direct_storage))) __unique_prefix_B20 = ((2 * sizeof(struct direct_storage))); __unique_prefix_A19 > __unique_prefix_B20 ? __unique_prefix_A19 : __unique_prefix_B20; })); | |||
| 1120 | ||||
| 1121 | /* Realloc storage (buckets and DIB array). */ | |||
| 1122 | new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL((void*)0), | |||
| 1123 | 1U << new_shift); | |||
| 1124 | if (!new_storage) | |||
| 1125 | return -ENOMEM12; | |||
| 1126 | ||||
| 1127 | /* Must upgrade direct to indirect storage. */ | |||
| 1128 | if (!h->has_indirect) { | |||
| 1129 | memcpy(new_storage, h->direct.storage, | |||
| 1130 | old_n_buckets * (hi->entry_size + sizeof(dib_raw_t))); | |||
| 1131 | h->indirect.n_entries = h->n_direct_entries; | |||
| 1132 | h->indirect.idx_lowest_entry = 0; | |||
| 1133 | h->n_direct_entries = 0; | |||
| 1134 | } | |||
| 1135 | ||||
| 1136 | /* Get a new hash key. If we've just upgraded to indirect storage, | |||
| 1137 | * allow reusing a previously generated key. It's still a different key | |||
| 1138 | * from the shared one that we used for direct storage. */ | |||
| 1139 | get_hash_key(h->indirect.hash_key, !h->has_indirect); | |||
| 1140 | ||||
| 1141 | h->has_indirect = true1; | |||
| 1142 | h->indirect.storage = new_storage; | |||
| 1143 | h->indirect.n_buckets = (1U << new_shift) / | |||
| 1144 | (hi->entry_size + sizeof(dib_raw_t)); | |||
| 1145 | ||||
| 1146 | old_dibs = (dib_raw_t*)((uint8_t*) new_storage + hi->entry_size * old_n_buckets); | |||
| 1147 | new_dibs = dib_raw_ptr(h); | |||
| 1148 | ||||
| 1149 | /* | |||
| 1150 | * Move the DIB array to the new place, replacing valid DIB values with | |||
| 1151 | * DIB_RAW_REHASH to indicate all of the used buckets need rehashing. | |||
| 1152 | * Note: Overlap is not possible, because we have at least doubled the | |||
| 1153 | * number of buckets and dib_raw_t is smaller than any entry type. | |||
| 1154 | */ | |||
| 1155 | for (idx = 0; idx < old_n_buckets; idx++) { | |||
| 1156 | assert(old_dibs[idx] != DIB_RAW_REHASH)do { if ((__builtin_expect(!!(!(old_dibs[idx] != ((dib_raw_t) 0xfeU))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("old_dibs[idx] != DIB_RAW_REHASH" ), "../src/basic/hashmap.c", 1156, __PRETTY_FUNCTION__); } while (0); | |||
| 1157 | new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE((dib_raw_t)0xffU) ? DIB_RAW_FREE((dib_raw_t)0xffU) | |||
| 1158 | : DIB_RAW_REHASH((dib_raw_t)0xfeU); | |||
| 1159 | } | |||
| 1160 | ||||
| 1161 | /* Zero the area of newly added entries (including the old DIB area) */ | |||
| 1162 | memzero(bucket_at(h, old_n_buckets),({ size_t _l_ = ((n_buckets(h) - old_n_buckets) * hi->entry_size ); void *_x_ = (bucket_at(h, old_n_buckets)); _l_ == 0 ? _x_ : memset(_x_, 0, _l_); }) | |||
| 1163 | (n_buckets(h) - old_n_buckets) * hi->entry_size)({ size_t _l_ = ((n_buckets(h) - old_n_buckets) * hi->entry_size ); void *_x_ = (bucket_at(h, old_n_buckets)); _l_ == 0 ? _x_ : memset(_x_, 0, _l_); }); | |||
| 1164 | ||||
| 1165 | /* The upper half of the new DIB array needs initialization */ | |||
| 1166 | memset(&new_dibs[old_n_buckets], DIB_RAW_INIT((char)((dib_raw_t)0xffU)), | |||
| 1167 | (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t)); | |||
| 1168 | ||||
| 1169 | /* Rehash entries that need it */ | |||
| 1170 | n_rehashed = 0; | |||
| 1171 | for (idx = 0; idx < old_n_buckets; idx++) { | |||
| 1172 | if (new_dibs[idx] != DIB_RAW_REHASH((dib_raw_t)0xfeU)) | |||
| 1173 | continue; | |||
| 1174 | ||||
| 1175 | optimal_idx = bucket_hash(h, bucket_at(h, idx)->key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), bucket_at(h, idx)->key); | |||
| 1176 | ||||
| 1177 | /* | |||
| 1178 | * Not much to do if by luck the entry hashes to its current | |||
| 1179 | * location. Just set its DIB. | |||
| 1180 | */ | |||
| 1181 | if (optimal_idx == idx) { | |||
| 1182 | new_dibs[idx] = 0; | |||
| 1183 | n_rehashed++; | |||
| 1184 | continue; | |||
| 1185 | } | |||
| 1186 | ||||
| 1187 | new_dibs[idx] = DIB_RAW_FREE((dib_raw_t)0xffU); | |||
| 1188 | bucket_move_entry(h, &swap, idx, IDX_PUT(((2147483647 *2U +1U) - 3) + 0)); | |||
| 1189 | /* bucket_move_entry does not clear the source */ | |||
| 1190 | memzero(bucket_at(h, idx), hi->entry_size)({ size_t _l_ = (hi->entry_size); void *_x_ = (bucket_at(h , idx)); _l_ == 0 ? _x_ : memset(_x_, 0, _l_); }); | |||
| 1191 | ||||
| 1192 | do { | |||
| 1193 | /* | |||
| 1194 | * Find the new bucket for the current entry. This may make | |||
| 1195 | * another entry homeless and load it into IDX_PUT. | |||
| 1196 | */ | |||
| 1197 | rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap); | |||
| 1198 | n_rehashed++; | |||
| 1199 | ||||
| 1200 | /* Did the current entry displace another one? */ | |||
| 1201 | if (rehash_next) | |||
| 1202 | optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), bucket_at_swap(&swap, (((2147483647 *2U + 1U) - 3) + 0))->p.b.key); | |||
| 1203 | } while (rehash_next); | |||
| 1204 | } | |||
| 1205 | ||||
| 1206 | assert(n_rehashed == n_entries(h))do { if ((__builtin_expect(!!(!(n_rehashed == n_entries(h))), 0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("n_rehashed == n_entries(h)" ), "../src/basic/hashmap.c", 1206, __PRETTY_FUNCTION__); } while (0); | |||
| 1207 | ||||
| 1208 | return 1; | |||
| 1209 | } | |||
| 1210 | ||||
| 1211 | /* | |||
| 1212 | * Finds an entry with a matching key | |||
| 1213 | * Returns: index of the found entry, or IDX_NIL if not found. | |||
| 1214 | */ | |||
| 1215 | static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) { | |||
| 1216 | struct hashmap_base_entry *e; | |||
| 1217 | unsigned dib, distance; | |||
| 1218 | dib_raw_t *dibs = dib_raw_ptr(h); | |||
| 1219 | ||||
| 1220 | assert(idx < n_buckets(h))do { if ((__builtin_expect(!!(!(idx < n_buckets(h))),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("idx < n_buckets(h)"), "../src/basic/hashmap.c" , 1220, __PRETTY_FUNCTION__); } while (0); | |||
| 1221 | ||||
| 1222 | for (distance = 0; ; distance++) { | |||
| 1223 | if (dibs[idx] == DIB_RAW_FREE((dib_raw_t)0xffU)) | |||
| 1224 | return IDX_NIL(2147483647 *2U +1U); | |||
| 1225 | ||||
| 1226 | dib = bucket_calculate_dib(h, idx, dibs[idx]); | |||
| 1227 | ||||
| 1228 | if (dib < distance) | |||
| 1229 | return IDX_NIL(2147483647 *2U +1U); | |||
| 1230 | if (dib == distance) { | |||
| 1231 | e = bucket_at(h, idx); | |||
| 1232 | if (h->hash_ops->compare(e->key, key) == 0) | |||
| 1233 | return idx; | |||
| 1234 | } | |||
| 1235 | ||||
| 1236 | idx = next_idx(h, idx); | |||
| 1237 | } | |||
| 1238 | } | |||
| 1239 | #define bucket_scan(h, idx, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx, key) base_bucket_scan(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx, key) | |||
| 1240 | ||||
| 1241 | int hashmap_put(Hashmap *h, const void *key, void *value) { | |||
| 1242 | struct swap_entries swap; | |||
| 1243 | struct plain_hashmap_entry *e; | |||
| 1244 | unsigned hash, idx; | |||
| 1245 | ||||
| 1246 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 1246, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1247 | ||||
| 1248 | hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1249 | idx = bucket_scan(h, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, key); | |||
| 1250 | if (idx != IDX_NIL(2147483647 *2U +1U)) { | |||
| 1251 | e = plain_bucket_at(h, idx); | |||
| 1252 | if (e->value == value) | |||
| 1253 | return 0; | |||
| 1254 | return -EEXIST17; | |||
| 1255 | } | |||
| 1256 | ||||
| 1257 | e = &bucket_at_swap(&swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0))->p; | |||
| 1258 | e->b.key = key; | |||
| 1259 | e->value = value; | |||
| 1260 | return hashmap_put_boldly(h, hash, &swap, true)hashmap_base_put_boldly(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, &swap, 1); | |||
| 1261 | } | |||
| 1262 | ||||
| 1263 | int set_put(Set *s, const void *key) { | |||
| 1264 | struct swap_entries swap; | |||
| 1265 | struct hashmap_base_entry *e; | |||
| 1266 | unsigned hash, idx; | |||
| 1267 | ||||
| 1268 | assert(s)do { if ((__builtin_expect(!!(!(s)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("s"), "../src/basic/hashmap.c", 1268, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1269 | ||||
| 1270 | hash = bucket_hash(s, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(s), HashmapBase*) || __builtin_types_compatible_p(typeof (s), Hashmap*) || __builtin_types_compatible_p(typeof(s), OrderedHashmap *) || __builtin_types_compatible_p(typeof(s), Set*)), (HashmapBase *)(s), (void)0), key); | |||
| 1271 | idx = bucket_scan(s, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(s), HashmapBase*) || __builtin_types_compatible_p(typeof (s), Hashmap*) || __builtin_types_compatible_p(typeof(s), OrderedHashmap *) || __builtin_types_compatible_p(typeof(s), Set*)), (HashmapBase *)(s), (void)0), hash, key); | |||
| 1272 | if (idx != IDX_NIL(2147483647 *2U +1U)) | |||
| 1273 | return 0; | |||
| 1274 | ||||
| 1275 | e = &bucket_at_swap(&swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0))->p.b; | |||
| 1276 | e->key = key; | |||
| 1277 | return hashmap_put_boldly(s, hash, &swap, true)hashmap_base_put_boldly(__builtin_choose_expr((__builtin_types_compatible_p (typeof(s), HashmapBase*) || __builtin_types_compatible_p(typeof (s), Hashmap*) || __builtin_types_compatible_p(typeof(s), OrderedHashmap *) || __builtin_types_compatible_p(typeof(s), Set*)), (HashmapBase *)(s), (void)0), hash, &swap, 1); | |||
| 1278 | } | |||
| 1279 | ||||
| 1280 | int hashmap_replace(Hashmap *h, const void *key, void *value) { | |||
| 1281 | struct swap_entries swap; | |||
| 1282 | struct plain_hashmap_entry *e; | |||
| 1283 | unsigned hash, idx; | |||
| 1284 | ||||
| 1285 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 1285, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1286 | ||||
| 1287 | hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1288 | idx = bucket_scan(h, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, key); | |||
| 1289 | if (idx != IDX_NIL(2147483647 *2U +1U)) { | |||
| 1290 | e = plain_bucket_at(h, idx); | |||
| 1291 | #if ENABLE_DEBUG_HASHMAP0 | |||
| 1292 | /* Although the key is equal, the key pointer may have changed, | |||
| 1293 | * and this would break our assumption for iterating. So count | |||
| 1294 | * this operation as incompatible with iteration. */ | |||
| 1295 | if (e->b.key != key) { | |||
| 1296 | h->b.debug.put_count++; | |||
| 1297 | h->b.debug.rem_count++; | |||
| 1298 | h->b.debug.last_rem_idx = idx; | |||
| 1299 | } | |||
| 1300 | #endif | |||
| 1301 | e->b.key = key; | |||
| 1302 | e->value = value; | |||
| 1303 | hashmap_set_dirty(h)base_set_dirty(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0)); | |||
| 1304 | ||||
| 1305 | return 0; | |||
| 1306 | } | |||
| 1307 | ||||
| 1308 | e = &bucket_at_swap(&swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0))->p; | |||
| 1309 | e->b.key = key; | |||
| 1310 | e->value = value; | |||
| 1311 | return hashmap_put_boldly(h, hash, &swap, true)hashmap_base_put_boldly(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, &swap, 1); | |||
| 1312 | } | |||
| 1313 | ||||
| 1314 | int hashmap_update(Hashmap *h, const void *key, void *value) { | |||
| 1315 | struct plain_hashmap_entry *e; | |||
| 1316 | unsigned hash, idx; | |||
| 1317 | ||||
| 1318 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 1318, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1319 | ||||
| 1320 | hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1321 | idx = bucket_scan(h, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, key); | |||
| 1322 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1323 | return -ENOENT2; | |||
| 1324 | ||||
| 1325 | e = plain_bucket_at(h, idx); | |||
| 1326 | e->value = value; | |||
| 1327 | hashmap_set_dirty(h)base_set_dirty(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0)); | |||
| 1328 | ||||
| 1329 | return 0; | |||
| 1330 | } | |||
| 1331 | ||||
| 1332 | void *internal_hashmap_get(HashmapBase *h, const void *key) { | |||
| 1333 | struct hashmap_base_entry *e; | |||
| 1334 | unsigned hash, idx; | |||
| 1335 | ||||
| 1336 | if (!h) | |||
| 1337 | return NULL((void*)0); | |||
| 1338 | ||||
| 1339 | hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1340 | idx = bucket_scan(h, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, key); | |||
| 1341 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1342 | return NULL((void*)0); | |||
| 1343 | ||||
| 1344 | e = bucket_at(h, idx); | |||
| 1345 | return entry_value(h, e); | |||
| 1346 | } | |||
| 1347 | ||||
| 1348 | void *hashmap_get2(Hashmap *h, const void *key, void **key2) { | |||
| 1349 | struct plain_hashmap_entry *e; | |||
| 1350 | unsigned hash, idx; | |||
| 1351 | ||||
| 1352 | if (!h) | |||
| 1353 | return NULL((void*)0); | |||
| 1354 | ||||
| 1355 | hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1356 | idx = bucket_scan(h, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, key); | |||
| 1357 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1358 | return NULL((void*)0); | |||
| 1359 | ||||
| 1360 | e = plain_bucket_at(h, idx); | |||
| 1361 | if (key2) | |||
| 1362 | *key2 = (void*) e->b.key; | |||
| 1363 | ||||
| 1364 | return e->value; | |||
| 1365 | } | |||
| 1366 | ||||
| 1367 | bool_Bool internal_hashmap_contains(HashmapBase *h, const void *key) { | |||
| 1368 | unsigned hash; | |||
| 1369 | ||||
| 1370 | if (!h) | |||
| 1371 | return false0; | |||
| 1372 | ||||
| 1373 | hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1374 | return bucket_scan(h, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, key) != IDX_NIL(2147483647 *2U +1U); | |||
| 1375 | } | |||
| 1376 | ||||
| 1377 | void *internal_hashmap_remove(HashmapBase *h, const void *key) { | |||
| 1378 | struct hashmap_base_entry *e; | |||
| 1379 | unsigned hash, idx; | |||
| 1380 | void *data; | |||
| 1381 | ||||
| 1382 | if (!h) | |||
| 1383 | return NULL((void*)0); | |||
| 1384 | ||||
| 1385 | hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1386 | idx = bucket_scan(h, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, key); | |||
| 1387 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1388 | return NULL((void*)0); | |||
| 1389 | ||||
| 1390 | e = bucket_at(h, idx); | |||
| 1391 | data = entry_value(h, e); | |||
| 1392 | remove_entry(h, idx)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx); | |||
| 1393 | ||||
| 1394 | return data; | |||
| 1395 | } | |||
| 1396 | ||||
| 1397 | void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) { | |||
| 1398 | struct plain_hashmap_entry *e; | |||
| 1399 | unsigned hash, idx; | |||
| 1400 | void *data; | |||
| 1401 | ||||
| 1402 | if (!h) { | |||
| 1403 | if (rkey) | |||
| 1404 | *rkey = NULL((void*)0); | |||
| 1405 | return NULL((void*)0); | |||
| 1406 | } | |||
| 1407 | ||||
| 1408 | hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1409 | idx = bucket_scan(h, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, key); | |||
| 1410 | if (idx == IDX_NIL(2147483647 *2U +1U)) { | |||
| 1411 | if (rkey) | |||
| 1412 | *rkey = NULL((void*)0); | |||
| 1413 | return NULL((void*)0); | |||
| 1414 | } | |||
| 1415 | ||||
| 1416 | e = plain_bucket_at(h, idx); | |||
| 1417 | data = e->value; | |||
| 1418 | if (rkey) | |||
| 1419 | *rkey = (void*) e->b.key; | |||
| 1420 | ||||
| 1421 | remove_entry(h, idx)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx); | |||
| 1422 | ||||
| 1423 | return data; | |||
| 1424 | } | |||
| 1425 | ||||
| 1426 | int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) { | |||
| 1427 | struct swap_entries swap; | |||
| 1428 | struct plain_hashmap_entry *e; | |||
| 1429 | unsigned old_hash, new_hash, idx; | |||
| 1430 | ||||
| 1431 | if (!h) | |||
| 1432 | return -ENOENT2; | |||
| 1433 | ||||
| 1434 | old_hash = bucket_hash(h, old_key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), old_key); | |||
| 1435 | idx = bucket_scan(h, old_hash, old_key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), old_hash, old_key); | |||
| 1436 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1437 | return -ENOENT2; | |||
| 1438 | ||||
| 1439 | new_hash = bucket_hash(h, new_key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), new_key); | |||
| 1440 | if (bucket_scan(h, new_hash, new_key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), new_hash, new_key) != IDX_NIL(2147483647 *2U +1U)) | |||
| 1441 | return -EEXIST17; | |||
| 1442 | ||||
| 1443 | remove_entry(h, idx)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx); | |||
| 1444 | ||||
| 1445 | e = &bucket_at_swap(&swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0))->p; | |||
| 1446 | e->b.key = new_key; | |||
| 1447 | e->value = value; | |||
| 1448 | assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1)do { if ((__builtin_expect(!!(!(hashmap_base_put_boldly(__builtin_choose_expr ((__builtin_types_compatible_p(typeof(h), HashmapBase*) || __builtin_types_compatible_p (typeof(h), Hashmap*) || __builtin_types_compatible_p(typeof( h), OrderedHashmap*) || __builtin_types_compatible_p(typeof(h ), Set*)), (HashmapBase*)(h), (void)0), new_hash, &swap, 0 ) == 1)),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("hashmap_put_boldly(h, new_hash, &swap, false) == 1" ), "../src/basic/hashmap.c", 1448, __PRETTY_FUNCTION__); } while (0); | |||
| 1449 | ||||
| 1450 | return 0; | |||
| 1451 | } | |||
| 1452 | ||||
| 1453 | int set_remove_and_put(Set *s, const void *old_key, const void *new_key) { | |||
| 1454 | struct swap_entries swap; | |||
| 1455 | struct hashmap_base_entry *e; | |||
| 1456 | unsigned old_hash, new_hash, idx; | |||
| 1457 | ||||
| 1458 | if (!s) | |||
| 1459 | return -ENOENT2; | |||
| 1460 | ||||
| 1461 | old_hash = bucket_hash(s, old_key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(s), HashmapBase*) || __builtin_types_compatible_p(typeof (s), Hashmap*) || __builtin_types_compatible_p(typeof(s), OrderedHashmap *) || __builtin_types_compatible_p(typeof(s), Set*)), (HashmapBase *)(s), (void)0), old_key); | |||
| 1462 | idx = bucket_scan(s, old_hash, old_key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(s), HashmapBase*) || __builtin_types_compatible_p(typeof (s), Hashmap*) || __builtin_types_compatible_p(typeof(s), OrderedHashmap *) || __builtin_types_compatible_p(typeof(s), Set*)), (HashmapBase *)(s), (void)0), old_hash, old_key); | |||
| 1463 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1464 | return -ENOENT2; | |||
| 1465 | ||||
| 1466 | new_hash = bucket_hash(s, new_key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(s), HashmapBase*) || __builtin_types_compatible_p(typeof (s), Hashmap*) || __builtin_types_compatible_p(typeof(s), OrderedHashmap *) || __builtin_types_compatible_p(typeof(s), Set*)), (HashmapBase *)(s), (void)0), new_key); | |||
| 1467 | if (bucket_scan(s, new_hash, new_key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(s), HashmapBase*) || __builtin_types_compatible_p(typeof (s), Hashmap*) || __builtin_types_compatible_p(typeof(s), OrderedHashmap *) || __builtin_types_compatible_p(typeof(s), Set*)), (HashmapBase *)(s), (void)0), new_hash, new_key) != IDX_NIL(2147483647 *2U +1U)) | |||
| 1468 | return -EEXIST17; | |||
| 1469 | ||||
| 1470 | remove_entry(s, idx)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(s), HashmapBase*) || __builtin_types_compatible_p(typeof (s), Hashmap*) || __builtin_types_compatible_p(typeof(s), OrderedHashmap *) || __builtin_types_compatible_p(typeof(s), Set*)), (HashmapBase *)(s), (void)0), idx); | |||
| 1471 | ||||
| 1472 | e = &bucket_at_swap(&swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0))->p.b; | |||
| 1473 | e->key = new_key; | |||
| 1474 | assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1)do { if ((__builtin_expect(!!(!(hashmap_base_put_boldly(__builtin_choose_expr ((__builtin_types_compatible_p(typeof(s), HashmapBase*) || __builtin_types_compatible_p (typeof(s), Hashmap*) || __builtin_types_compatible_p(typeof( s), OrderedHashmap*) || __builtin_types_compatible_p(typeof(s ), Set*)), (HashmapBase*)(s), (void)0), new_hash, &swap, 0 ) == 1)),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("hashmap_put_boldly(s, new_hash, &swap, false) == 1" ), "../src/basic/hashmap.c", 1474, __PRETTY_FUNCTION__); } while (0); | |||
| 1475 | ||||
| 1476 | return 0; | |||
| 1477 | } | |||
| 1478 | ||||
| 1479 | int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) { | |||
| 1480 | struct swap_entries swap; | |||
| 1481 | struct plain_hashmap_entry *e; | |||
| 1482 | unsigned old_hash, new_hash, idx_old, idx_new; | |||
| 1483 | ||||
| 1484 | if (!h) | |||
| 1485 | return -ENOENT2; | |||
| 1486 | ||||
| 1487 | old_hash = bucket_hash(h, old_key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), old_key); | |||
| 1488 | idx_old = bucket_scan(h, old_hash, old_key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), old_hash, old_key); | |||
| 1489 | if (idx_old == IDX_NIL(2147483647 *2U +1U)) | |||
| 1490 | return -ENOENT2; | |||
| 1491 | ||||
| 1492 | old_key = bucket_at(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx_old)->key; | |||
| 1493 | ||||
| 1494 | new_hash = bucket_hash(h, new_key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), new_key); | |||
| 1495 | idx_new = bucket_scan(h, new_hash, new_key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), new_hash, new_key); | |||
| 1496 | if (idx_new != IDX_NIL(2147483647 *2U +1U)) | |||
| 1497 | if (idx_old != idx_new) { | |||
| 1498 | remove_entry(h, idx_new)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx_new); | |||
| 1499 | /* Compensate for a possible backward shift. */ | |||
| 1500 | if (old_key != bucket_at(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx_old)->key) | |||
| 1501 | idx_old = prev_idx(HASHMAP_BASE(h)__builtin_choose_expr((__builtin_types_compatible_p(typeof(h) , HashmapBase*) || __builtin_types_compatible_p(typeof(h), Hashmap *) || __builtin_types_compatible_p(typeof(h), OrderedHashmap* ) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx_old); | |||
| 1502 | assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key)do { if ((__builtin_expect(!!(!(old_key == bucket_at(__builtin_choose_expr ((__builtin_types_compatible_p(typeof(h), HashmapBase*) || __builtin_types_compatible_p (typeof(h), Hashmap*) || __builtin_types_compatible_p(typeof( h), OrderedHashmap*) || __builtin_types_compatible_p(typeof(h ), Set*)), (HashmapBase*)(h), (void)0), idx_old)->key)),0) )) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key" ), "../src/basic/hashmap.c", 1502, __PRETTY_FUNCTION__); } while (0); | |||
| 1503 | } | |||
| 1504 | ||||
| 1505 | remove_entry(h, idx_old)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx_old); | |||
| 1506 | ||||
| 1507 | e = &bucket_at_swap(&swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0))->p; | |||
| 1508 | e->b.key = new_key; | |||
| 1509 | e->value = value; | |||
| 1510 | assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1)do { if ((__builtin_expect(!!(!(hashmap_base_put_boldly(__builtin_choose_expr ((__builtin_types_compatible_p(typeof(h), HashmapBase*) || __builtin_types_compatible_p (typeof(h), Hashmap*) || __builtin_types_compatible_p(typeof( h), OrderedHashmap*) || __builtin_types_compatible_p(typeof(h ), Set*)), (HashmapBase*)(h), (void)0), new_hash, &swap, 0 ) == 1)),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("hashmap_put_boldly(h, new_hash, &swap, false) == 1" ), "../src/basic/hashmap.c", 1510, __PRETTY_FUNCTION__); } while (0); | |||
| 1511 | ||||
| 1512 | return 0; | |||
| 1513 | } | |||
| 1514 | ||||
| 1515 | void *hashmap_remove_value(Hashmap *h, const void *key, void *value) { | |||
| 1516 | struct plain_hashmap_entry *e; | |||
| 1517 | unsigned hash, idx; | |||
| 1518 | ||||
| 1519 | if (!h) | |||
| 1520 | return NULL((void*)0); | |||
| 1521 | ||||
| 1522 | hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1523 | idx = bucket_scan(h, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, key); | |||
| 1524 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1525 | return NULL((void*)0); | |||
| 1526 | ||||
| 1527 | e = plain_bucket_at(h, idx); | |||
| 1528 | if (e->value != value) | |||
| 1529 | return NULL((void*)0); | |||
| 1530 | ||||
| 1531 | remove_entry(h, idx)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx); | |||
| 1532 | ||||
| 1533 | return value; | |||
| 1534 | } | |||
| 1535 | ||||
| 1536 | static unsigned find_first_entry(HashmapBase *h) { | |||
| 1537 | Iterator i = ITERATOR_FIRST((Iterator) { .idx = ((2147483647 *2U +1U) - 1), .next_key = ( (void*)0) }); | |||
| 1538 | ||||
| 1539 | if (!h || !n_entries(h)) | |||
| 1540 | return IDX_NIL(2147483647 *2U +1U); | |||
| 1541 | ||||
| 1542 | return hashmap_iterate_entry(h, &i); | |||
| 1543 | } | |||
| 1544 | ||||
| 1545 | void *internal_hashmap_first(HashmapBase *h) { | |||
| 1546 | unsigned idx; | |||
| 1547 | ||||
| 1548 | idx = find_first_entry(h); | |||
| 1549 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1550 | return NULL((void*)0); | |||
| 1551 | ||||
| 1552 | return entry_value(h, bucket_at(h, idx)); | |||
| 1553 | } | |||
| 1554 | ||||
| 1555 | void *internal_hashmap_first_key(HashmapBase *h) { | |||
| 1556 | struct hashmap_base_entry *e; | |||
| 1557 | unsigned idx; | |||
| 1558 | ||||
| 1559 | idx = find_first_entry(h); | |||
| 1560 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1561 | return NULL((void*)0); | |||
| 1562 | ||||
| 1563 | e = bucket_at(h, idx); | |||
| 1564 | return (void*) e->key; | |||
| 1565 | } | |||
| 1566 | ||||
| 1567 | void *internal_hashmap_steal_first(HashmapBase *h) { | |||
| 1568 | struct hashmap_base_entry *e; | |||
| 1569 | void *data; | |||
| 1570 | unsigned idx; | |||
| 1571 | ||||
| 1572 | idx = find_first_entry(h); | |||
| 1573 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1574 | return NULL((void*)0); | |||
| 1575 | ||||
| 1576 | e = bucket_at(h, idx); | |||
| 1577 | data = entry_value(h, e); | |||
| 1578 | remove_entry(h, idx)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx); | |||
| 1579 | ||||
| 1580 | return data; | |||
| 1581 | } | |||
| 1582 | ||||
| 1583 | void *internal_hashmap_steal_first_key(HashmapBase *h) { | |||
| 1584 | struct hashmap_base_entry *e; | |||
| 1585 | void *key; | |||
| 1586 | unsigned idx; | |||
| 1587 | ||||
| 1588 | idx = find_first_entry(h); | |||
| 1589 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1590 | return NULL((void*)0); | |||
| 1591 | ||||
| 1592 | e = bucket_at(h, idx); | |||
| 1593 | key = (void*) e->key; | |||
| 1594 | remove_entry(h, idx)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), idx); | |||
| 1595 | ||||
| 1596 | return key; | |||
| 1597 | } | |||
| 1598 | ||||
| 1599 | unsigned internal_hashmap_size(HashmapBase *h) { | |||
| 1600 | ||||
| 1601 | if (!h) | |||
| 1602 | return 0; | |||
| 1603 | ||||
| 1604 | return n_entries(h); | |||
| 1605 | } | |||
| 1606 | ||||
| 1607 | unsigned internal_hashmap_buckets(HashmapBase *h) { | |||
| 1608 | ||||
| 1609 | if (!h) | |||
| 1610 | return 0; | |||
| 1611 | ||||
| 1612 | return n_buckets(h); | |||
| 1613 | } | |||
| 1614 | ||||
| 1615 | int internal_hashmap_merge(Hashmap *h, Hashmap *other) { | |||
| 1616 | Iterator i; | |||
| 1617 | unsigned idx; | |||
| 1618 | ||||
| 1619 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 1619, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1620 | ||||
| 1621 | HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i)for ((i) = ((Iterator) { .idx = ((2147483647 *2U +1U) - 1), . next_key = ((void*)0) }), (idx) = hashmap_iterate_entry((__builtin_choose_expr ((__builtin_types_compatible_p(typeof(other), HashmapBase*) || __builtin_types_compatible_p(typeof(other), Hashmap*) || __builtin_types_compatible_p (typeof(other), OrderedHashmap*) || __builtin_types_compatible_p (typeof(other), Set*)), (HashmapBase*)(other), (void)0)), & (i)); (idx != (2147483647 *2U +1U)); (idx) = hashmap_iterate_entry ((__builtin_choose_expr((__builtin_types_compatible_p(typeof( other), HashmapBase*) || __builtin_types_compatible_p(typeof( other), Hashmap*) || __builtin_types_compatible_p(typeof(other ), OrderedHashmap*) || __builtin_types_compatible_p(typeof(other ), Set*)), (HashmapBase*)(other), (void)0)), &(i))) { | |||
| 1622 | struct plain_hashmap_entry *pe = plain_bucket_at(other, idx); | |||
| 1623 | int r; | |||
| 1624 | ||||
| 1625 | r = hashmap_put(h, pe->b.key, pe->value); | |||
| 1626 | if (r < 0 && r != -EEXIST17) | |||
| 1627 | return r; | |||
| 1628 | } | |||
| 1629 | ||||
| 1630 | return 0; | |||
| 1631 | } | |||
| 1632 | ||||
| 1633 | int set_merge(Set *s, Set *other) { | |||
| 1634 | Iterator i; | |||
| 1635 | unsigned idx; | |||
| 1636 | ||||
| 1637 | assert(s)do { if ((__builtin_expect(!!(!(s)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("s"), "../src/basic/hashmap.c", 1637, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1638 | ||||
| 1639 | HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i)for ((i) = ((Iterator) { .idx = ((2147483647 *2U +1U) - 1), . next_key = ((void*)0) }), (idx) = hashmap_iterate_entry((__builtin_choose_expr ((__builtin_types_compatible_p(typeof(other), HashmapBase*) || __builtin_types_compatible_p(typeof(other), Hashmap*) || __builtin_types_compatible_p (typeof(other), OrderedHashmap*) || __builtin_types_compatible_p (typeof(other), Set*)), (HashmapBase*)(other), (void)0)), & (i)); (idx != (2147483647 *2U +1U)); (idx) = hashmap_iterate_entry ((__builtin_choose_expr((__builtin_types_compatible_p(typeof( other), HashmapBase*) || __builtin_types_compatible_p(typeof( other), Hashmap*) || __builtin_types_compatible_p(typeof(other ), OrderedHashmap*) || __builtin_types_compatible_p(typeof(other ), Set*)), (HashmapBase*)(other), (void)0)), &(i))) { | |||
| 1640 | struct set_entry *se = set_bucket_at(other, idx); | |||
| 1641 | int r; | |||
| 1642 | ||||
| 1643 | r = set_put(s, se->b.key); | |||
| 1644 | if (r < 0) | |||
| 1645 | return r; | |||
| 1646 | } | |||
| 1647 | ||||
| 1648 | return 0; | |||
| 1649 | } | |||
| 1650 | ||||
| 1651 | int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) { | |||
| 1652 | int r; | |||
| 1653 | ||||
| 1654 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 1654, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1655 | ||||
| 1656 | r = resize_buckets(h, entries_add); | |||
| 1657 | if (r < 0) | |||
| 1658 | return r; | |||
| 1659 | ||||
| 1660 | return 0; | |||
| 1661 | } | |||
| 1662 | ||||
| 1663 | /* | |||
| 1664 | * The same as hashmap_merge(), but every new item from other is moved to h. | |||
| 1665 | * Keys already in h are skipped and stay in other. | |||
| 1666 | * Returns: 0 on success. | |||
| 1667 | * -ENOMEM on alloc failure, in which case no move has been done. | |||
| 1668 | */ | |||
| 1669 | int internal_hashmap_move(HashmapBase *h, HashmapBase *other) { | |||
| 1670 | struct swap_entries swap; | |||
| 1671 | struct hashmap_base_entry *e, *n; | |||
| 1672 | Iterator i; | |||
| 1673 | unsigned idx; | |||
| 1674 | int r; | |||
| 1675 | ||||
| 1676 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 1676, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1677 | ||||
| 1678 | if (!other) | |||
| 1679 | return 0; | |||
| 1680 | ||||
| 1681 | assert(other->type == h->type)do { if ((__builtin_expect(!!(!(other->type == h->type) ),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("other->type == h->type" ), "../src/basic/hashmap.c", 1681, __PRETTY_FUNCTION__); } while (0); | |||
| 1682 | ||||
| 1683 | /* | |||
| 1684 | * This reserves buckets for the worst case, where none of other's | |||
| 1685 | * entries are yet present in h. This is preferable to risking | |||
| 1686 | * an allocation failure in the middle of the moving and having to | |||
| 1687 | * rollback or return a partial result. | |||
| 1688 | */ | |||
| 1689 | r = resize_buckets(h, n_entries(other)); | |||
| 1690 | if (r < 0) | |||
| 1691 | return r; | |||
| 1692 | ||||
| 1693 | HASHMAP_FOREACH_IDX(idx, other, i)for ((i) = ((Iterator) { .idx = ((2147483647 *2U +1U) - 1), . next_key = ((void*)0) }), (idx) = hashmap_iterate_entry((other ), &(i)); (idx != (2147483647 *2U +1U)); (idx) = hashmap_iterate_entry ((other), &(i))) { | |||
| 1694 | unsigned h_hash; | |||
| 1695 | ||||
| 1696 | e = bucket_at(other, idx); | |||
| 1697 | h_hash = bucket_hash(h, e->key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), e->key); | |||
| 1698 | if (bucket_scan(h, h_hash, e->key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), h_hash, e->key) != IDX_NIL(2147483647 *2U +1U)) | |||
| 1699 | continue; | |||
| 1700 | ||||
| 1701 | n = &bucket_at_swap(&swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0))->p.b; | |||
| 1702 | n->key = e->key; | |||
| 1703 | if (h->type != HASHMAP_TYPE_SET) | |||
| 1704 | ((struct plain_hashmap_entry*) n)->value = | |||
| 1705 | ((struct plain_hashmap_entry*) e)->value; | |||
| 1706 | assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1)do { if ((__builtin_expect(!!(!(hashmap_base_put_boldly(__builtin_choose_expr ((__builtin_types_compatible_p(typeof(h), HashmapBase*) || __builtin_types_compatible_p (typeof(h), Hashmap*) || __builtin_types_compatible_p(typeof( h), OrderedHashmap*) || __builtin_types_compatible_p(typeof(h ), Set*)), (HashmapBase*)(h), (void)0), h_hash, &swap, 0) == 1)),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("hashmap_put_boldly(h, h_hash, &swap, false) == 1" ), "../src/basic/hashmap.c", 1706, __PRETTY_FUNCTION__); } while (0); | |||
| 1707 | ||||
| 1708 | remove_entry(other, idx)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(other), HashmapBase*) || __builtin_types_compatible_p (typeof(other), Hashmap*) || __builtin_types_compatible_p(typeof (other), OrderedHashmap*) || __builtin_types_compatible_p(typeof (other), Set*)), (HashmapBase*)(other), (void)0), idx); | |||
| 1709 | } | |||
| 1710 | ||||
| 1711 | return 0; | |||
| 1712 | } | |||
| 1713 | ||||
| 1714 | int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) { | |||
| 1715 | struct swap_entries swap; | |||
| 1716 | unsigned h_hash, other_hash, idx; | |||
| 1717 | struct hashmap_base_entry *e, *n; | |||
| 1718 | int r; | |||
| 1719 | ||||
| 1720 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 1720, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1721 | ||||
| 1722 | h_hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1723 | if (bucket_scan(h, h_hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), h_hash, key) != IDX_NIL(2147483647 *2U +1U)) | |||
| 1724 | return -EEXIST17; | |||
| 1725 | ||||
| 1726 | if (!other) | |||
| 1727 | return -ENOENT2; | |||
| 1728 | ||||
| 1729 | assert(other->type == h->type)do { if ((__builtin_expect(!!(!(other->type == h->type) ),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("other->type == h->type" ), "../src/basic/hashmap.c", 1729, __PRETTY_FUNCTION__); } while (0); | |||
| 1730 | ||||
| 1731 | other_hash = bucket_hash(other, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(other), HashmapBase*) || __builtin_types_compatible_p (typeof(other), Hashmap*) || __builtin_types_compatible_p(typeof (other), OrderedHashmap*) || __builtin_types_compatible_p(typeof (other), Set*)), (HashmapBase*)(other), (void)0), key); | |||
| 1732 | idx = bucket_scan(other, other_hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(other), HashmapBase*) || __builtin_types_compatible_p (typeof(other), Hashmap*) || __builtin_types_compatible_p(typeof (other), OrderedHashmap*) || __builtin_types_compatible_p(typeof (other), Set*)), (HashmapBase*)(other), (void)0), other_hash, key); | |||
| 1733 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1734 | return -ENOENT2; | |||
| 1735 | ||||
| 1736 | e = bucket_at(other, idx); | |||
| 1737 | ||||
| 1738 | n = &bucket_at_swap(&swap, IDX_PUT(((2147483647 *2U +1U) - 3) + 0))->p.b; | |||
| 1739 | n->key = e->key; | |||
| 1740 | if (h->type != HASHMAP_TYPE_SET) | |||
| 1741 | ((struct plain_hashmap_entry*) n)->value = | |||
| 1742 | ((struct plain_hashmap_entry*) e)->value; | |||
| 1743 | r = hashmap_put_boldly(h, h_hash, &swap, true)hashmap_base_put_boldly(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), h_hash, &swap, 1); | |||
| 1744 | if (r < 0) | |||
| 1745 | return r; | |||
| 1746 | ||||
| 1747 | remove_entry(other, idx)base_remove_entry(__builtin_choose_expr((__builtin_types_compatible_p (typeof(other), HashmapBase*) || __builtin_types_compatible_p (typeof(other), Hashmap*) || __builtin_types_compatible_p(typeof (other), OrderedHashmap*) || __builtin_types_compatible_p(typeof (other), Set*)), (HashmapBase*)(other), (void)0), idx); | |||
| 1748 | return 0; | |||
| 1749 | } | |||
| 1750 | ||||
| 1751 | HashmapBase *internal_hashmap_copy(HashmapBase *h) { | |||
| 1752 | HashmapBase *copy; | |||
| 1753 | int r; | |||
| 1754 | ||||
| 1755 | assert(h)do { if ((__builtin_expect(!!(!(h)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("h"), "../src/basic/hashmap.c", 1755, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1756 | ||||
| 1757 | copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS); | |||
| 1758 | if (!copy) | |||
| 1759 | return NULL((void*)0); | |||
| 1760 | ||||
| 1761 | switch (h->type) { | |||
| 1762 | case HASHMAP_TYPE_PLAIN: | |||
| 1763 | case HASHMAP_TYPE_ORDERED: | |||
| 1764 | r = hashmap_merge((Hashmap*)copy, (Hashmap*)h)internal_hashmap_merge(__builtin_choose_expr((__builtin_types_compatible_p (typeof((Hashmap*)copy), Hashmap*) || __builtin_types_compatible_p (typeof((Hashmap*)copy), OrderedHashmap*)), (Hashmap*)((Hashmap *)copy), (void)0), __builtin_choose_expr((__builtin_types_compatible_p (typeof((Hashmap*)h), Hashmap*) || __builtin_types_compatible_p (typeof((Hashmap*)h), OrderedHashmap*)), (Hashmap*)((Hashmap* )h), (void)0)); | |||
| 1765 | break; | |||
| 1766 | case HASHMAP_TYPE_SET: | |||
| 1767 | r = set_merge((Set*)copy, (Set*)h); | |||
| 1768 | break; | |||
| 1769 | default: | |||
| 1770 | assert_not_reached("Unknown hashmap type")do { log_assert_failed_unreachable_realm(LOG_REALM_SYSTEMD, ( "Unknown hashmap type"), "../src/basic/hashmap.c", 1770, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1771 | } | |||
| 1772 | ||||
| 1773 | if (r < 0) { | |||
| 1774 | internal_hashmap_free(copy); | |||
| 1775 | return NULL((void*)0); | |||
| 1776 | } | |||
| 1777 | ||||
| 1778 | return copy; | |||
| 1779 | } | |||
| 1780 | ||||
| 1781 | char **internal_hashmap_get_strv(HashmapBase *h) { | |||
| 1782 | char **sv; | |||
| 1783 | Iterator i; | |||
| 1784 | unsigned idx, n; | |||
| 1785 | ||||
| 1786 | sv = new(char*, n_entries(h)+1)((char**) malloc_multiply(sizeof(char*), (n_entries(h)+1))); | |||
| ||||
| 1787 | if (!sv) | |||
| 1788 | return NULL((void*)0); | |||
| 1789 | ||||
| 1790 | n = 0; | |||
| 1791 | HASHMAP_FOREACH_IDX(idx, h, i)for ((i) = ((Iterator) { .idx = ((2147483647 *2U +1U) - 1), . next_key = ((void*)0) }), (idx) = hashmap_iterate_entry((h), & (i)); (idx != (2147483647 *2U +1U)); (idx) = hashmap_iterate_entry ((h), &(i))) | |||
| 1792 | sv[n++] = entry_value(h, bucket_at(h, idx)); | |||
| ||||
| 1793 | sv[n] = NULL((void*)0); | |||
| 1794 | ||||
| 1795 | return sv; | |||
| 1796 | } | |||
| 1797 | ||||
| 1798 | void *ordered_hashmap_next(OrderedHashmap *h, const void *key) { | |||
| 1799 | struct ordered_hashmap_entry *e; | |||
| 1800 | unsigned hash, idx; | |||
| 1801 | ||||
| 1802 | if (!h) | |||
| 1803 | return NULL((void*)0); | |||
| 1804 | ||||
| 1805 | hash = bucket_hash(h, key)base_bucket_hash(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), key); | |||
| 1806 | idx = bucket_scan(h, hash, key)base_bucket_scan(__builtin_choose_expr((__builtin_types_compatible_p (typeof(h), HashmapBase*) || __builtin_types_compatible_p(typeof (h), Hashmap*) || __builtin_types_compatible_p(typeof(h), OrderedHashmap *) || __builtin_types_compatible_p(typeof(h), Set*)), (HashmapBase *)(h), (void)0), hash, key); | |||
| 1807 | if (idx == IDX_NIL(2147483647 *2U +1U)) | |||
| 1808 | return NULL((void*)0); | |||
| 1809 | ||||
| 1810 | e = ordered_bucket_at(h, idx); | |||
| 1811 | if (e->iterate_next == IDX_NIL(2147483647 *2U +1U)) | |||
| 1812 | return NULL((void*)0); | |||
| 1813 | return ordered_bucket_at(h, e->iterate_next)->p.value; | |||
| 1814 | } | |||
| 1815 | ||||
| 1816 | int set_consume(Set *s, void *value) { | |||
| 1817 | int r; | |||
| 1818 | ||||
| 1819 | assert(s)do { if ((__builtin_expect(!!(!(s)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("s"), "../src/basic/hashmap.c", 1819, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1820 | assert(value)do { if ((__builtin_expect(!!(!(value)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("value"), "../src/basic/hashmap.c", 1820 , __PRETTY_FUNCTION__); } while (0); | |||
| 1821 | ||||
| 1822 | r = set_put(s, value); | |||
| 1823 | if (r <= 0) | |||
| 1824 | free(value); | |||
| 1825 | ||||
| 1826 | return r; | |||
| 1827 | } | |||
| 1828 | ||||
| 1829 | int set_put_strdup(Set *s, const char *p) { | |||
| 1830 | char *c; | |||
| 1831 | ||||
| 1832 | assert(s)do { if ((__builtin_expect(!!(!(s)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("s"), "../src/basic/hashmap.c", 1832, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1833 | assert(p)do { if ((__builtin_expect(!!(!(p)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("p"), "../src/basic/hashmap.c", 1833, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1834 | ||||
| 1835 | if (set_contains(s, (char*) p)) | |||
| 1836 | return 0; | |||
| 1837 | ||||
| 1838 | c = strdup(p); | |||
| 1839 | if (!c) | |||
| 1840 | return -ENOMEM12; | |||
| 1841 | ||||
| 1842 | return set_consume(s, c); | |||
| 1843 | } | |||
| 1844 | ||||
| 1845 | int set_put_strdupv(Set *s, char **l) { | |||
| 1846 | int n = 0, r; | |||
| 1847 | char **i; | |||
| 1848 | ||||
| 1849 | assert(s)do { if ((__builtin_expect(!!(!(s)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("s"), "../src/basic/hashmap.c", 1849, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1850 | ||||
| 1851 | STRV_FOREACH(i, l)for ((i) = (l); (i) && *(i); (i)++) { | |||
| 1852 | r = set_put_strdup(s, *i); | |||
| 1853 | if (r < 0) | |||
| 1854 | return r; | |||
| 1855 | ||||
| 1856 | n += r; | |||
| 1857 | } | |||
| 1858 | ||||
| 1859 | return n; | |||
| 1860 | } | |||
| 1861 | ||||
| 1862 | int set_put_strsplit(Set *s, const char *v, const char *separators, ExtractFlags flags) { | |||
| 1863 | const char *p = v; | |||
| 1864 | int r; | |||
| 1865 | ||||
| 1866 | assert(s)do { if ((__builtin_expect(!!(!(s)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("s"), "../src/basic/hashmap.c", 1866, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1867 | assert(v)do { if ((__builtin_expect(!!(!(v)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("v"), "../src/basic/hashmap.c", 1867, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1868 | ||||
| 1869 | for (;;) { | |||
| 1870 | char *word; | |||
| 1871 | ||||
| 1872 | r = extract_first_word(&p, &word, separators, flags); | |||
| 1873 | if (r <= 0) | |||
| 1874 | return r; | |||
| 1875 | ||||
| 1876 | r = set_consume(s, word); | |||
| 1877 | if (r < 0) | |||
| 1878 | return r; | |||
| 1879 | } | |||
| 1880 | } | |||
| 1881 | ||||
| 1882 | /* expand the cachemem if needed, return true if newly (re)activated. */ | |||
| 1883 | static int cachemem_maintain(CacheMem *mem, unsigned size) { | |||
| 1884 | assert(mem)do { if ((__builtin_expect(!!(!(mem)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("mem"), "../src/basic/hashmap.c", 1884, __PRETTY_FUNCTION__ ); } while (0); | |||
| 1885 | ||||
| 1886 | if (!GREEDY_REALLOC(mem->ptr, mem->n_allocated, size)greedy_realloc((void**) &(mem->ptr), &(mem->n_allocated ), (size), sizeof((mem->ptr)[0]))) { | |||
| 1887 | if (size > 0) | |||
| 1888 | return -ENOMEM12; | |||
| 1889 | } | |||
| 1890 | ||||
| 1891 | if (!mem->active) { | |||
| 1892 | mem->active = true1; | |||
| 1893 | return true1; | |||
| 1894 | } | |||
| 1895 | ||||
| 1896 | return false0; | |||
| 1897 | } | |||
| 1898 | ||||
| 1899 | int iterated_cache_get(IteratedCache *cache, const void ***res_keys, const void ***res_values, unsigned *res_n_entries) { | |||
| 1900 | bool_Bool sync_keys = false0, sync_values = false0; | |||
| 1901 | unsigned size; | |||
| 1902 | int r; | |||
| 1903 | ||||
| 1904 | assert(cache)do { if ((__builtin_expect(!!(!(cache)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("cache"), "../src/basic/hashmap.c", 1904 , __PRETTY_FUNCTION__); } while (0); | |||
| 1905 | assert(cache->hashmap)do { if ((__builtin_expect(!!(!(cache->hashmap)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("cache->hashmap"), "../src/basic/hashmap.c" , 1905, __PRETTY_FUNCTION__); } while (0); | |||
| 1906 | ||||
| 1907 | size = n_entries(cache->hashmap); | |||
| 1908 | ||||
| 1909 | if (res_keys) { | |||
| 1910 | r = cachemem_maintain(&cache->keys, size); | |||
| 1911 | if (r < 0) | |||
| 1912 | return r; | |||
| 1913 | ||||
| 1914 | sync_keys = r; | |||
| 1915 | } else | |||
| 1916 | cache->keys.active = false0; | |||
| 1917 | ||||
| 1918 | if (res_values) { | |||
| 1919 | r = cachemem_maintain(&cache->values, size); | |||
| 1920 | if (r < 0) | |||
| 1921 | return r; | |||
| 1922 | ||||
| 1923 | sync_values = r; | |||
| 1924 | } else | |||
| 1925 | cache->values.active = false0; | |||
| 1926 | ||||
| 1927 | if (cache->hashmap->dirty) { | |||
| 1928 | if (cache->keys.active) | |||
| 1929 | sync_keys = true1; | |||
| 1930 | if (cache->values.active) | |||
| 1931 | sync_values = true1; | |||
| 1932 | ||||
| 1933 | cache->hashmap->dirty = false0; | |||
| 1934 | } | |||
| 1935 | ||||
| 1936 | if (sync_keys || sync_values) { | |||
| 1937 | unsigned i, idx; | |||
| 1938 | Iterator iter; | |||
| 1939 | ||||
| 1940 | i = 0; | |||
| 1941 | HASHMAP_FOREACH_IDX(idx, cache->hashmap, iter)for ((iter) = ((Iterator) { .idx = ((2147483647 *2U +1U) - 1) , .next_key = ((void*)0) }), (idx) = hashmap_iterate_entry((cache ->hashmap), &(iter)); (idx != (2147483647 *2U +1U)); ( idx) = hashmap_iterate_entry((cache->hashmap), &(iter) )) { | |||
| 1942 | struct hashmap_base_entry *e; | |||
| 1943 | ||||
| 1944 | e = bucket_at(cache->hashmap, idx); | |||
| 1945 | ||||
| 1946 | if (sync_keys) | |||
| 1947 | cache->keys.ptr[i] = e->key; | |||
| 1948 | if (sync_values) | |||
| 1949 | cache->values.ptr[i] = entry_value(cache->hashmap, e); | |||
| 1950 | i++; | |||
| 1951 | } | |||
| 1952 | } | |||
| 1953 | ||||
| 1954 | if (res_keys) | |||
| 1955 | *res_keys = cache->keys.ptr; | |||
| 1956 | if (res_values) | |||
| 1957 | *res_values = cache->values.ptr; | |||
| 1958 | if (res_n_entries) | |||
| 1959 | *res_n_entries = size; | |||
| 1960 | ||||
| 1961 | return 0; | |||
| 1962 | } | |||
| 1963 | ||||
| 1964 | IteratedCache *iterated_cache_free(IteratedCache *cache) { | |||
| 1965 | if (cache) { | |||
| 1966 | free(cache->keys.ptr); | |||
| 1967 | free(cache->values.ptr); | |||
| 1968 | free(cache); | |||
| 1969 | } | |||
| 1970 | ||||
| 1971 | return NULL((void*)0); | |||
| 1972 | } |
| 1 | /* SPDX-License-Identifier: LGPL-2.1+ */ |
| 2 | #pragma once |
| 3 | |
| 4 | #include <alloca.h> |
| 5 | #include <stddef.h> |
| 6 | #include <stdlib.h> |
| 7 | #include <string.h> |
| 8 | |
| 9 | #include "macro.h" |
| 10 | |
| 11 | #define new(t, n)((t*) malloc_multiply(sizeof(t), (n))) ((t*) malloc_multiply(sizeof(t), (n))) |
| 12 | |
| 13 | #define new0(t, n)((t*) calloc((n), sizeof(t))) ((t*) calloc((n), sizeof(t))) |
| 14 | |
| 15 | #define newa(t, n)({ do { if ((__builtin_expect(!!(!(!size_multiply_overflow(sizeof (t), n))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("!size_multiply_overflow(sizeof(t), n)" ), "../src/basic/alloc-util.h", 15, __PRETTY_FUNCTION__); } while (0); (t*) __builtin_alloca (sizeof(t)*(n)); }) \ |
| 16 | ({ \ |
| 17 | assert(!size_multiply_overflow(sizeof(t), n))do { if ((__builtin_expect(!!(!(!size_multiply_overflow(sizeof (t), n))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("!size_multiply_overflow(sizeof(t), n)" ), "../src/basic/alloc-util.h", 17, __PRETTY_FUNCTION__); } while (0); \ |
| 18 | (t*) alloca(sizeof(t)*(n))__builtin_alloca (sizeof(t)*(n)); \ |
| 19 | }) |
| 20 | |
| 21 | #define newa0(t, n)({ do { if ((__builtin_expect(!!(!(!size_multiply_overflow(sizeof (t), n))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("!size_multiply_overflow(sizeof(t), n)" ), "../src/basic/alloc-util.h", 21, __PRETTY_FUNCTION__); } while (0); (t*) ({ char *_new_; size_t _len_ = sizeof(t)*(n); _new_ = __builtin_alloca (_len_); (void *) memset(_new_, 0, _len_) ; }); }) \ |
| 22 | ({ \ |
| 23 | assert(!size_multiply_overflow(sizeof(t), n))do { if ((__builtin_expect(!!(!(!size_multiply_overflow(sizeof (t), n))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("!size_multiply_overflow(sizeof(t), n)" ), "../src/basic/alloc-util.h", 23, __PRETTY_FUNCTION__); } while (0); \ |
| 24 | (t*) alloca0(sizeof(t)*(n))({ char *_new_; size_t _len_ = sizeof(t)*(n); _new_ = __builtin_alloca (_len_); (void *) memset(_new_, 0, _len_); }); \ |
| 25 | }) |
| 26 | |
| 27 | #define newdup(t, p, n)((t*) memdup_multiply(p, sizeof(t), (n))) ((t*) memdup_multiply(p, sizeof(t), (n))) |
| 28 | |
| 29 | #define newdup_suffix0(t, p, n)((t*) memdup_suffix0_multiply(p, sizeof(t), (n))) ((t*) memdup_suffix0_multiply(p, sizeof(t), (n))) |
| 30 | |
| 31 | #define malloc0(n)(calloc(1, (n))) (calloc(1, (n))) |
| 32 | |
| 33 | static inline void *mfree(void *memory) { |
| 34 | free(memory); |
| 35 | return NULL((void*)0); |
| 36 | } |
| 37 | |
| 38 | #define free_and_replace(a, b)({ free(a); (a) = (b); (b) = ((void*)0); 0; }) \ |
| 39 | ({ \ |
| 40 | free(a); \ |
| 41 | (a) = (b); \ |
| 42 | (b) = NULL((void*)0); \ |
| 43 | 0; \ |
| 44 | }) |
| 45 | |
| 46 | void* memdup(const void *p, size_t l) _alloc_(2); |
| 47 | void* memdup_suffix0(const void *p, size_t l) _alloc_(2); |
| 48 | |
| 49 | static inline void freep(void *p) { |
| 50 | free(*(void**) p); |
| 51 | } |
| 52 | |
| 53 | #define _cleanup_free___attribute__((cleanup(freep))) _cleanup_(freep)__attribute__((cleanup(freep))) |
| 54 | |
| 55 | static inline bool_Bool size_multiply_overflow(size_t size, size_t need) { |
| 56 | return _unlikely_(need != 0 && size > (SIZE_MAX / need))(__builtin_expect(!!(need != 0 && size > ((18446744073709551615UL ) / need)),0)); |
| 57 | } |
| 58 | |
| 59 | _malloc___attribute__ ((malloc)) _alloc_(1, 2) static inline void *malloc_multiply(size_t size, size_t need) { |
| 60 | if (size_multiply_overflow(size, need)) |
| 61 | return NULL((void*)0); |
| 62 | |
| 63 | return malloc(size * need); |
| 64 | } |
| 65 | |
| 66 | #if !HAVE_REALLOCARRAY1 |
| 67 | _alloc_(2, 3) static inline void *reallocarray(void *p, size_t need, size_t size) { |
| 68 | if (size_multiply_overflow(size, need)) |
| 69 | return NULL((void*)0); |
| 70 | |
| 71 | return realloc(p, size * need); |
| 72 | } |
| 73 | #endif |
| 74 | |
| 75 | _alloc_(2, 3) static inline void *memdup_multiply(const void *p, size_t size, size_t need) { |
| 76 | if (size_multiply_overflow(size, need)) |
| 77 | return NULL((void*)0); |
| 78 | |
| 79 | return memdup(p, size * need); |
| 80 | } |
| 81 | |
| 82 | _alloc_(2, 3) static inline void *memdup_suffix0_multiply(const void *p, size_t size, size_t need) { |
| 83 | if (size_multiply_overflow(size, need)) |
| 84 | return NULL((void*)0); |
| 85 | |
| 86 | return memdup_suffix0(p, size * need); |
| 87 | } |
| 88 | |
| 89 | void* greedy_realloc(void **p, size_t *allocated, size_t need, size_t size); |
| 90 | void* greedy_realloc0(void **p, size_t *allocated, size_t need, size_t size); |
| 91 | |
| 92 | #define GREEDY_REALLOC(array, allocated, need)greedy_realloc((void**) &(array), &(allocated), (need ), sizeof((array)[0])) \ |
| 93 | greedy_realloc((void**) &(array), &(allocated), (need), sizeof((array)[0])) |
| 94 | |
| 95 | #define GREEDY_REALLOC0(array, allocated, need)greedy_realloc0((void**) &(array), &(allocated), (need ), sizeof((array)[0])) \ |
| 96 | greedy_realloc0((void**) &(array), &(allocated), (need), sizeof((array)[0])) |
| 97 | |
| 98 | #define alloca0(n)({ char *_new_; size_t _len_ = n; _new_ = __builtin_alloca (_len_ ); (void *) memset(_new_, 0, _len_); }) \ |
| 99 | ({ \ |
| 100 | char *_new_; \ |
| 101 | size_t _len_ = n; \ |
| 102 | _new_ = alloca(_len_)__builtin_alloca (_len_); \ |
| 103 | (void *) memset(_new_, 0, _len_); \ |
| 104 | }) |
| 105 | |
| 106 | /* It's not clear what alignment glibc/gcc alloca() guarantee, hence provide a guaranteed safe version */ |
| 107 | #define alloca_align(size, align)({ void *_ptr_; size_t _mask_ = (align) - 1; _ptr_ = __builtin_alloca ((size) + _mask_); (void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_); }) \ |
| 108 | ({ \ |
| 109 | void *_ptr_; \ |
| 110 | size_t _mask_ = (align) - 1; \ |
| 111 | _ptr_ = alloca((size) + _mask_)__builtin_alloca ((size) + _mask_); \ |
| 112 | (void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_); \ |
| 113 | }) |
| 114 | |
| 115 | #define alloca0_align(size, align)({ void *_new_; size_t _size_ = (size); _new_ = ({ void *_ptr_ ; size_t _mask_ = ((align)) - 1; _ptr_ = __builtin_alloca ((_size_ ) + _mask_); (void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_ ); }); (void*)memset(_new_, 0, _size_); }) \ |
| 116 | ({ \ |
| 117 | void *_new_; \ |
| 118 | size_t _size_ = (size); \ |
| 119 | _new_ = alloca_align(_size_, (align))({ void *_ptr_; size_t _mask_ = ((align)) - 1; _ptr_ = __builtin_alloca ((_size_) + _mask_); (void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_); }); \ |
| 120 | (void*)memset(_new_, 0, _size_); \ |
| 121 | }) |
| 122 | |
| 123 | /* Takes inspiration from Rusts's Option::take() method: reads and returns a pointer, but at the same time resets it to |
| 124 | * NULL. See: https://doc.rust-lang.org/std/option/enum.Option.html#method.take */ |
| 125 | #define TAKE_PTR(ptr)({ typeof(ptr) _ptr_ = (ptr); (ptr) = ((void*)0); _ptr_; }) \ |
| 126 | ({ \ |
| 127 | typeof(ptr) _ptr_ = (ptr); \ |
| 128 | (ptr) = NULL((void*)0); \ |
| 129 | _ptr_; \ |
| 130 | }) |