Line data Source code
1 : /* SPDX-License-Identifier: LGPL-2.1+ */
2 :
3 : #include <errno.h>
4 : #include <stdlib.h>
5 : #include <string.h>
6 : #include <sys/prctl.h>
7 : #include <sys/stat.h>
8 : #include <unistd.h>
9 :
10 : #include "sd-id128.h"
11 : #include "sd-messages.h"
12 :
13 : #include "all-units.h"
14 : #include "alloc-util.h"
15 : #include "bpf-firewall.h"
16 : #include "bus-common-errors.h"
17 : #include "bus-util.h"
18 : #include "cgroup-util.h"
19 : #include "dbus-unit.h"
20 : #include "dbus.h"
21 : #include "dropin.h"
22 : #include "escape.h"
23 : #include "execute.h"
24 : #include "fd-util.h"
25 : #include "fileio-label.h"
26 : #include "fileio.h"
27 : #include "format-util.h"
28 : #include "fs-util.h"
29 : #include "id128-util.h"
30 : #include "io-util.h"
31 : #include "install.h"
32 : #include "load-dropin.h"
33 : #include "load-fragment.h"
34 : #include "log.h"
35 : #include "macro.h"
36 : #include "missing.h"
37 : #include "mkdir.h"
38 : #include "parse-util.h"
39 : #include "path-util.h"
40 : #include "process-util.h"
41 : #include "serialize.h"
42 : #include "set.h"
43 : #include "signal-util.h"
44 : #include "sparse-endian.h"
45 : #include "special.h"
46 : #include "specifier.h"
47 : #include "stat-util.h"
48 : #include "stdio-util.h"
49 : #include "string-table.h"
50 : #include "string-util.h"
51 : #include "strv.h"
52 : #include "terminal-util.h"
53 : #include "tmpfile-util.h"
54 : #include "umask-util.h"
55 : #include "unit-name.h"
56 : #include "unit.h"
57 : #include "user-util.h"
58 : #include "virt.h"
59 :
60 : /* Thresholds for logging at INFO level about resource consumption */
61 : #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
62 : #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
63 : #define MENTIONWORTHY_IP_BYTES (0ULL)
64 :
65 : /* Thresholds for logging at INFO level about resource consumption */
66 : #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
67 : #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
68 : #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
69 :
70 : const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
71 : [UNIT_SERVICE] = &service_vtable,
72 : [UNIT_SOCKET] = &socket_vtable,
73 : [UNIT_TARGET] = &target_vtable,
74 : [UNIT_DEVICE] = &device_vtable,
75 : [UNIT_MOUNT] = &mount_vtable,
76 : [UNIT_AUTOMOUNT] = &automount_vtable,
77 : [UNIT_SWAP] = &swap_vtable,
78 : [UNIT_TIMER] = &timer_vtable,
79 : [UNIT_PATH] = &path_vtable,
80 : [UNIT_SLICE] = &slice_vtable,
81 : [UNIT_SCOPE] = &scope_vtable,
82 : };
83 :
84 : static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
85 :
86 2160 : Unit *unit_new(Manager *m, size_t size) {
87 : Unit *u;
88 :
89 2160 : assert(m);
90 2160 : assert(size >= sizeof(Unit));
91 :
92 2160 : u = malloc0(size);
93 2160 : if (!u)
94 0 : return NULL;
95 :
96 2160 : u->names = set_new(&string_hash_ops);
97 2160 : if (!u->names)
98 0 : return mfree(u);
99 :
100 2160 : u->manager = m;
101 2160 : u->type = _UNIT_TYPE_INVALID;
102 2160 : u->default_dependencies = true;
103 2160 : u->unit_file_state = _UNIT_FILE_STATE_INVALID;
104 2160 : u->unit_file_preset = -1;
105 2160 : u->on_failure_job_mode = JOB_REPLACE;
106 2160 : u->cgroup_control_inotify_wd = -1;
107 2160 : u->cgroup_memory_inotify_wd = -1;
108 2160 : u->job_timeout = USEC_INFINITY;
109 2160 : u->job_running_timeout = USEC_INFINITY;
110 2160 : u->ref_uid = UID_INVALID;
111 2160 : u->ref_gid = GID_INVALID;
112 2160 : u->cpu_usage_last = NSEC_INFINITY;
113 2160 : u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
114 2160 : u->failure_action_exit_status = u->success_action_exit_status = -1;
115 :
116 2160 : u->ip_accounting_ingress_map_fd = -1;
117 2160 : u->ip_accounting_egress_map_fd = -1;
118 2160 : u->ipv4_allow_map_fd = -1;
119 2160 : u->ipv6_allow_map_fd = -1;
120 2160 : u->ipv4_deny_map_fd = -1;
121 2160 : u->ipv6_deny_map_fd = -1;
122 :
123 2160 : u->last_section_private = -1;
124 :
125 2160 : RATELIMIT_INIT(u->start_limit, m->default_start_limit_interval, m->default_start_limit_burst);
126 2160 : RATELIMIT_INIT(u->auto_stop_ratelimit, 10 * USEC_PER_SEC, 16);
127 :
128 10800 : for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
129 8640 : u->io_accounting_last[i] = UINT64_MAX;
130 :
131 2160 : return u;
132 : }
133 :
134 1705 : int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
135 1705 : _cleanup_(unit_freep) Unit *u = NULL;
136 : int r;
137 :
138 1705 : u = unit_new(m, size);
139 1705 : if (!u)
140 0 : return -ENOMEM;
141 :
142 1705 : r = unit_add_name(u, name);
143 1705 : if (r < 0)
144 0 : return r;
145 :
146 1705 : *ret = TAKE_PTR(u);
147 :
148 1705 : return r;
149 : }
150 :
151 1092 : bool unit_has_name(const Unit *u, const char *name) {
152 1092 : assert(u);
153 1092 : assert(name);
154 :
155 1092 : return set_contains(u->names, (char*) name);
156 : }
157 :
158 2158 : static void unit_init(Unit *u) {
159 : CGroupContext *cc;
160 : ExecContext *ec;
161 : KillContext *kc;
162 :
163 2158 : assert(u);
164 2158 : assert(u->manager);
165 2158 : assert(u->type >= 0);
166 :
167 2158 : cc = unit_get_cgroup_context(u);
168 2158 : if (cc) {
169 589 : cgroup_context_init(cc);
170 :
171 : /* Copy in the manager defaults into the cgroup
172 : * context, _before_ the rest of the settings have
173 : * been initialized */
174 :
175 589 : cc->cpu_accounting = u->manager->default_cpu_accounting;
176 589 : cc->io_accounting = u->manager->default_io_accounting;
177 589 : cc->blockio_accounting = u->manager->default_blockio_accounting;
178 589 : cc->memory_accounting = u->manager->default_memory_accounting;
179 589 : cc->tasks_accounting = u->manager->default_tasks_accounting;
180 589 : cc->ip_accounting = u->manager->default_ip_accounting;
181 :
182 589 : if (u->type != UNIT_SLICE)
183 570 : cc->tasks_max = u->manager->default_tasks_max;
184 : }
185 :
186 2158 : ec = unit_get_exec_context(u);
187 2158 : if (ec) {
188 559 : exec_context_init(ec);
189 :
190 559 : ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
191 559 : EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
192 : }
193 :
194 2158 : kc = unit_get_kill_context(u);
195 2158 : if (kc)
196 570 : kill_context_init(kc);
197 :
198 2158 : if (UNIT_VTABLE(u)->init)
199 2048 : UNIT_VTABLE(u)->init(u);
200 2158 : }
201 :
202 2196 : int unit_add_name(Unit *u, const char *text) {
203 2196 : _cleanup_free_ char *s = NULL, *i = NULL;
204 : UnitType t;
205 : int r;
206 :
207 2196 : assert(u);
208 2196 : assert(text);
209 :
210 2196 : if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
211 :
212 0 : if (!u->instance)
213 0 : return -EINVAL;
214 :
215 0 : r = unit_name_replace_instance(text, u->instance, &s);
216 0 : if (r < 0)
217 0 : return r;
218 : } else {
219 2196 : s = strdup(text);
220 2196 : if (!s)
221 0 : return -ENOMEM;
222 : }
223 :
224 2196 : if (set_contains(u->names, s))
225 2 : return 0;
226 2194 : if (hashmap_contains(u->manager->units, s))
227 0 : return -EEXIST;
228 :
229 2194 : if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
230 0 : return -EINVAL;
231 :
232 2194 : t = unit_name_to_type(s);
233 2194 : if (t < 0)
234 0 : return -EINVAL;
235 :
236 2194 : if (u->type != _UNIT_TYPE_INVALID && t != u->type)
237 0 : return -EINVAL;
238 :
239 2194 : r = unit_name_to_instance(s, &i);
240 2194 : if (r < 0)
241 0 : return r;
242 :
243 2194 : if (i && !unit_type_may_template(t))
244 0 : return -EINVAL;
245 :
246 : /* Ensure that this unit is either instanced or not instanced,
247 : * but not both. Note that we do allow names with different
248 : * instance names however! */
249 2194 : if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
250 0 : return -EINVAL;
251 :
252 2194 : if (!unit_type_may_alias(t) && !set_isempty(u->names))
253 0 : return -EEXIST;
254 :
255 2194 : if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
256 0 : return -E2BIG;
257 :
258 2194 : r = set_put(u->names, s);
259 2194 : if (r < 0)
260 0 : return r;
261 2194 : assert(r > 0);
262 :
263 2194 : r = hashmap_put(u->manager->units, s, u);
264 2194 : if (r < 0) {
265 0 : (void) set_remove(u->names, s);
266 0 : return r;
267 : }
268 :
269 2194 : if (u->type == _UNIT_TYPE_INVALID) {
270 2158 : u->type = t;
271 2158 : u->id = s;
272 2158 : u->instance = TAKE_PTR(i);
273 :
274 2158 : LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
275 :
276 2158 : unit_init(u);
277 : }
278 :
279 2194 : s = NULL;
280 :
281 2194 : unit_add_to_dbus_queue(u);
282 2194 : return 0;
283 : }
284 :
285 2144 : int unit_choose_id(Unit *u, const char *name) {
286 2144 : _cleanup_free_ char *t = NULL;
287 : char *s, *i;
288 : int r;
289 :
290 2144 : assert(u);
291 2144 : assert(name);
292 :
293 2144 : if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
294 :
295 0 : if (!u->instance)
296 0 : return -EINVAL;
297 :
298 0 : r = unit_name_replace_instance(name, u->instance, &t);
299 0 : if (r < 0)
300 0 : return r;
301 :
302 0 : name = t;
303 : }
304 :
305 : /* Selects one of the names of this unit as the id */
306 2144 : s = set_get(u->names, (char*) name);
307 2144 : if (!s)
308 0 : return -ENOENT;
309 :
310 : /* Determine the new instance from the new id */
311 2144 : r = unit_name_to_instance(s, &i);
312 2144 : if (r < 0)
313 0 : return r;
314 :
315 2144 : u->id = s;
316 :
317 2144 : free(u->instance);
318 2144 : u->instance = i;
319 :
320 2144 : unit_add_to_dbus_queue(u);
321 :
322 2144 : return 0;
323 : }
324 :
325 1738 : int unit_set_description(Unit *u, const char *description) {
326 : int r;
327 :
328 1738 : assert(u);
329 :
330 1738 : r = free_and_strdup(&u->description, empty_to_null(description));
331 1738 : if (r < 0)
332 0 : return r;
333 1738 : if (r > 0)
334 1683 : unit_add_to_dbus_queue(u);
335 :
336 1738 : return 0;
337 : }
338 :
339 5243 : bool unit_may_gc(Unit *u) {
340 : UnitActiveState state;
341 : int r;
342 :
343 5243 : assert(u);
344 :
345 : /* Checks whether the unit is ready to be unloaded for garbage collection.
346 : * Returns true when the unit may be collected, and false if there's some
347 : * reason to keep it loaded.
348 : *
349 : * References from other units are *not* checked here. Instead, this is done
350 : * in unit_gc_sweep(), but using markers to properly collect dependency loops.
351 : */
352 :
353 5243 : if (u->job)
354 11 : return false;
355 :
356 5232 : if (u->nop_job)
357 0 : return false;
358 :
359 5232 : state = unit_active_state(u);
360 :
361 : /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
362 5232 : if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
363 2401 : UNIT_VTABLE(u)->release_resources)
364 88 : UNIT_VTABLE(u)->release_resources(u);
365 :
366 5232 : if (u->perpetual)
367 1174 : return false;
368 :
369 4058 : if (sd_bus_track_count(u->bus_track) > 0)
370 0 : return false;
371 :
372 : /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
373 4058 : switch (u->collect_mode) {
374 :
375 4058 : case COLLECT_INACTIVE:
376 4058 : if (state != UNIT_INACTIVE)
377 1690 : return false;
378 :
379 2368 : break;
380 :
381 0 : case COLLECT_INACTIVE_OR_FAILED:
382 0 : if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
383 0 : return false;
384 :
385 0 : break;
386 :
387 0 : default:
388 0 : assert_not_reached("Unknown garbage collection mode");
389 : }
390 :
391 2368 : if (u->cgroup_path) {
392 : /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
393 : * around. Units with active processes should never be collected. */
394 :
395 0 : r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
396 0 : if (r < 0)
397 0 : log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
398 0 : if (r <= 0)
399 0 : return false;
400 : }
401 :
402 2368 : if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
403 220 : return false;
404 :
405 2148 : return true;
406 : }
407 :
408 2155 : void unit_add_to_load_queue(Unit *u) {
409 2155 : assert(u);
410 2155 : assert(u->type != _UNIT_TYPE_INVALID);
411 :
412 2155 : if (u->load_state != UNIT_STUB || u->in_load_queue)
413 0 : return;
414 :
415 2155 : LIST_PREPEND(load_queue, u->manager->load_queue, u);
416 2155 : u->in_load_queue = true;
417 : }
418 :
419 0 : void unit_add_to_cleanup_queue(Unit *u) {
420 0 : assert(u);
421 :
422 0 : if (u->in_cleanup_queue)
423 0 : return;
424 :
425 0 : LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
426 0 : u->in_cleanup_queue = true;
427 : }
428 :
429 6924 : void unit_add_to_gc_queue(Unit *u) {
430 6924 : assert(u);
431 :
432 6924 : if (u->in_gc_queue || u->in_cleanup_queue)
433 2871 : return;
434 :
435 4053 : if (!unit_may_gc(u))
436 2151 : return;
437 :
438 1902 : LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
439 1902 : u->in_gc_queue = true;
440 : }
441 :
442 15269 : void unit_add_to_dbus_queue(Unit *u) {
443 15269 : assert(u);
444 15269 : assert(u->type != _UNIT_TYPE_INVALID);
445 :
446 15269 : if (u->load_state == UNIT_STUB || u->in_dbus_queue)
447 7898 : return;
448 :
449 : /* Shortcut things if nobody cares */
450 14742 : if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
451 14742 : sd_bus_track_count(u->bus_track) <= 0 &&
452 7371 : set_isempty(u->manager->private_buses)) {
453 7371 : u->sent_dbus_new_signal = true;
454 7371 : return;
455 : }
456 :
457 0 : LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
458 0 : u->in_dbus_queue = true;
459 : }
460 :
461 1749 : void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
462 1749 : assert(u);
463 :
464 1749 : if (u->in_stop_when_unneeded_queue)
465 0 : return;
466 :
467 1749 : if (!u->stop_when_unneeded)
468 1749 : return;
469 :
470 0 : if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
471 0 : return;
472 :
473 0 : LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
474 0 : u->in_stop_when_unneeded_queue = true;
475 : }
476 :
477 47520 : static void bidi_set_free(Unit *u, Hashmap *h) {
478 : Unit *other;
479 : Iterator i;
480 : void *v;
481 :
482 47520 : assert(u);
483 :
484 : /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
485 :
486 49754 : HASHMAP_FOREACH_KEY(v, other, h, i) {
487 : UnitDependency d;
488 :
489 51382 : for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
490 49148 : hashmap_remove(other->dependencies[d], u);
491 :
492 2234 : unit_add_to_gc_queue(other);
493 : }
494 :
495 47520 : hashmap_free(h);
496 47520 : }
497 :
498 2160 : static void unit_remove_transient(Unit *u) {
499 : char **i;
500 :
501 2160 : assert(u);
502 :
503 2160 : if (!u->transient)
504 2149 : return;
505 :
506 11 : if (u->fragment_path)
507 0 : (void) unlink(u->fragment_path);
508 :
509 11 : STRV_FOREACH(i, u->dropin_paths) {
510 0 : _cleanup_free_ char *p = NULL, *pp = NULL;
511 :
512 0 : p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
513 0 : if (!p)
514 0 : continue;
515 :
516 0 : pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
517 0 : if (!pp)
518 0 : continue;
519 :
520 : /* Only drop transient drop-ins */
521 0 : if (!path_equal(u->manager->lookup_paths.transient, pp))
522 0 : continue;
523 :
524 0 : (void) unlink(*i);
525 0 : (void) rmdir(p);
526 : }
527 : }
528 :
529 2160 : static void unit_free_requires_mounts_for(Unit *u) {
530 2160 : assert(u);
531 :
532 249 : for (;;) {
533 2409 : _cleanup_free_ char *path;
534 :
535 2409 : path = hashmap_steal_first_key(u->requires_mounts_for);
536 2409 : if (!path)
537 2160 : break;
538 249 : else {
539 249 : char s[strlen(path) + 1];
540 :
541 1051 : PATH_FOREACH_PREFIX_MORE(s, path) {
542 : char *y;
543 : Set *x;
544 :
545 802 : x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
546 802 : if (!x)
547 7 : continue;
548 :
549 795 : (void) set_remove(x, u);
550 :
551 795 : if (set_isempty(x)) {
552 322 : (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
553 322 : free(y);
554 322 : set_free(x);
555 : }
556 : }
557 : }
558 : }
559 :
560 2160 : u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
561 2160 : }
562 :
563 2160 : static void unit_done(Unit *u) {
564 : ExecContext *ec;
565 : CGroupContext *cc;
566 :
567 2160 : assert(u);
568 :
569 2160 : if (u->type < 0)
570 2 : return;
571 :
572 2158 : if (UNIT_VTABLE(u)->done)
573 2029 : UNIT_VTABLE(u)->done(u);
574 :
575 2158 : ec = unit_get_exec_context(u);
576 2158 : if (ec)
577 559 : exec_context_done(ec);
578 :
579 2158 : cc = unit_get_cgroup_context(u);
580 2158 : if (cc)
581 589 : cgroup_context_done(cc);
582 : }
583 :
584 2160 : void unit_free(Unit *u) {
585 : UnitDependency d;
586 : Iterator i;
587 : char *t;
588 :
589 2160 : if (!u)
590 0 : return;
591 :
592 2160 : if (UNIT_ISSET(u->slice)) {
593 : /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
594 183 : unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
595 :
596 : /* And make sure the parent is realized again, updating cgroup memberships */
597 183 : unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
598 : }
599 :
600 2160 : u->transient_file = safe_fclose(u->transient_file);
601 :
602 2160 : if (!MANAGER_IS_RELOADING(u->manager))
603 2160 : unit_remove_transient(u);
604 :
605 2160 : bus_unit_send_removed_signal(u);
606 :
607 2160 : unit_done(u);
608 :
609 2160 : unit_dequeue_rewatch_pids(u);
610 :
611 2160 : sd_bus_slot_unref(u->match_bus_slot);
612 2160 : sd_bus_track_unref(u->bus_track);
613 2160 : u->deserialized_refs = strv_free(u->deserialized_refs);
614 :
615 2160 : unit_free_requires_mounts_for(u);
616 :
617 4354 : SET_FOREACH(t, u->names, i)
618 2194 : hashmap_remove_value(u->manager->units, t, u);
619 :
620 2160 : if (!sd_id128_is_null(u->invocation_id))
621 1483 : hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
622 :
623 2160 : if (u->job) {
624 6 : Job *j = u->job;
625 6 : job_uninstall(j);
626 6 : job_free(j);
627 : }
628 :
629 2160 : if (u->nop_job) {
630 0 : Job *j = u->nop_job;
631 0 : job_uninstall(j);
632 0 : job_free(j);
633 : }
634 :
635 49680 : for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
636 47520 : bidi_set_free(u, u->dependencies[d]);
637 :
638 2160 : if (u->on_console)
639 0 : manager_unref_console(u->manager);
640 :
641 2160 : unit_release_cgroup(u);
642 :
643 2160 : if (!MANAGER_IS_RELOADING(u->manager))
644 2160 : unit_unlink_state_files(u);
645 :
646 2160 : unit_unref_uid_gid(u, false);
647 :
648 2160 : (void) manager_update_failed_units(u->manager, u, false);
649 2160 : set_remove(u->manager->startup_units, u);
650 :
651 2160 : unit_unwatch_all_pids(u);
652 :
653 2160 : unit_ref_unset(&u->slice);
654 2259 : while (u->refs_by_target)
655 99 : unit_ref_unset(u->refs_by_target);
656 :
657 2160 : if (u->type != _UNIT_TYPE_INVALID)
658 2158 : LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
659 :
660 2160 : if (u->in_load_queue)
661 0 : LIST_REMOVE(load_queue, u->manager->load_queue, u);
662 :
663 2160 : if (u->in_dbus_queue)
664 0 : LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
665 :
666 2160 : if (u->in_gc_queue)
667 1902 : LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
668 :
669 2160 : if (u->in_cgroup_realize_queue)
670 16 : LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
671 :
672 2160 : if (u->in_cgroup_empty_queue)
673 0 : LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
674 :
675 2160 : if (u->in_cleanup_queue)
676 0 : LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
677 :
678 2160 : if (u->in_target_deps_queue)
679 0 : LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
680 :
681 2160 : if (u->in_stop_when_unneeded_queue)
682 0 : LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
683 :
684 2160 : safe_close(u->ip_accounting_ingress_map_fd);
685 2160 : safe_close(u->ip_accounting_egress_map_fd);
686 :
687 2160 : safe_close(u->ipv4_allow_map_fd);
688 2160 : safe_close(u->ipv6_allow_map_fd);
689 2160 : safe_close(u->ipv4_deny_map_fd);
690 2160 : safe_close(u->ipv6_deny_map_fd);
691 :
692 2160 : bpf_program_unref(u->ip_bpf_ingress);
693 2160 : bpf_program_unref(u->ip_bpf_ingress_installed);
694 2160 : bpf_program_unref(u->ip_bpf_egress);
695 2160 : bpf_program_unref(u->ip_bpf_egress_installed);
696 :
697 2160 : set_free(u->ip_bpf_custom_ingress);
698 2160 : set_free(u->ip_bpf_custom_egress);
699 2160 : set_free(u->ip_bpf_custom_ingress_installed);
700 2160 : set_free(u->ip_bpf_custom_egress_installed);
701 :
702 2160 : bpf_program_unref(u->bpf_device_control_installed);
703 :
704 2160 : condition_free_list(u->conditions);
705 2160 : condition_free_list(u->asserts);
706 :
707 2160 : free(u->description);
708 2160 : strv_free(u->documentation);
709 2160 : free(u->fragment_path);
710 2160 : free(u->source_path);
711 2160 : strv_free(u->dropin_paths);
712 2160 : free(u->instance);
713 :
714 2160 : free(u->job_timeout_reboot_arg);
715 :
716 2160 : set_free_free(u->names);
717 :
718 2160 : free(u->reboot_arg);
719 :
720 2160 : free(u);
721 : }
722 :
723 12602 : UnitActiveState unit_active_state(Unit *u) {
724 12602 : assert(u);
725 :
726 12602 : if (u->load_state == UNIT_MERGED)
727 0 : return unit_active_state(unit_follow_merge(u));
728 :
729 : /* After a reload it might happen that a unit is not correctly
730 : * loaded but still has a process around. That's why we won't
731 : * shortcut failed loading to UNIT_INACTIVE_FAILED. */
732 :
733 12602 : return UNIT_VTABLE(u)->active_state(u);
734 : }
735 :
736 0 : const char* unit_sub_state_to_string(Unit *u) {
737 0 : assert(u);
738 :
739 0 : return UNIT_VTABLE(u)->sub_state_to_string(u);
740 : }
741 :
742 0 : static int set_complete_move(Set **s, Set **other) {
743 0 : assert(s);
744 0 : assert(other);
745 :
746 0 : if (!other)
747 0 : return 0;
748 :
749 0 : if (*s)
750 0 : return set_move(*s, *other);
751 : else
752 0 : *s = TAKE_PTR(*other);
753 :
754 0 : return 0;
755 : }
756 :
757 0 : static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
758 0 : assert(s);
759 0 : assert(other);
760 :
761 0 : if (!*other)
762 0 : return 0;
763 :
764 0 : if (*s)
765 0 : return hashmap_move(*s, *other);
766 : else
767 0 : *s = TAKE_PTR(*other);
768 :
769 0 : return 0;
770 : }
771 :
772 0 : static int merge_names(Unit *u, Unit *other) {
773 : char *t;
774 : Iterator i;
775 : int r;
776 :
777 0 : assert(u);
778 0 : assert(other);
779 :
780 0 : r = set_complete_move(&u->names, &other->names);
781 0 : if (r < 0)
782 0 : return r;
783 :
784 0 : set_free_free(other->names);
785 0 : other->names = NULL;
786 0 : other->id = NULL;
787 :
788 0 : SET_FOREACH(t, u->names, i)
789 0 : assert_se(hashmap_replace(u->manager->units, t, u) == 0);
790 :
791 0 : return 0;
792 : }
793 :
794 0 : static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
795 : unsigned n_reserve;
796 :
797 0 : assert(u);
798 0 : assert(other);
799 0 : assert(d < _UNIT_DEPENDENCY_MAX);
800 :
801 : /*
802 : * If u does not have this dependency set allocated, there is no need
803 : * to reserve anything. In that case other's set will be transferred
804 : * as a whole to u by complete_move().
805 : */
806 0 : if (!u->dependencies[d])
807 0 : return 0;
808 :
809 : /* merge_dependencies() will skip a u-on-u dependency */
810 0 : n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
811 :
812 0 : return hashmap_reserve(u->dependencies[d], n_reserve);
813 : }
814 :
815 0 : static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
816 : Iterator i;
817 : Unit *back;
818 : void *v;
819 : int r;
820 :
821 : /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
822 :
823 0 : assert(u);
824 0 : assert(other);
825 0 : assert(d < _UNIT_DEPENDENCY_MAX);
826 :
827 : /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
828 0 : HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
829 : UnitDependency k;
830 :
831 : /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
832 : * pointers back, and let's fix them up, to instead point to 'u'. */
833 :
834 0 : for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
835 0 : if (back == u) {
836 : /* Do not add dependencies between u and itself. */
837 0 : if (hashmap_remove(back->dependencies[k], other))
838 0 : maybe_warn_about_dependency(u, other_id, k);
839 : } else {
840 : UnitDependencyInfo di_u, di_other, di_merged;
841 :
842 : /* Let's drop this dependency between "back" and "other", and let's create it between
843 : * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
844 : * and any such dependency which might already exist */
845 :
846 0 : di_other.data = hashmap_get(back->dependencies[k], other);
847 0 : if (!di_other.data)
848 0 : continue; /* dependency isn't set, let's try the next one */
849 :
850 0 : di_u.data = hashmap_get(back->dependencies[k], u);
851 :
852 0 : di_merged = (UnitDependencyInfo) {
853 0 : .origin_mask = di_u.origin_mask | di_other.origin_mask,
854 0 : .destination_mask = di_u.destination_mask | di_other.destination_mask,
855 : };
856 :
857 0 : r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
858 0 : if (r < 0)
859 0 : log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
860 0 : assert(r >= 0);
861 :
862 : /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
863 : }
864 : }
865 :
866 : }
867 :
868 : /* Also do not move dependencies on u to itself */
869 0 : back = hashmap_remove(other->dependencies[d], u);
870 0 : if (back)
871 0 : maybe_warn_about_dependency(u, other_id, d);
872 :
873 : /* The move cannot fail. The caller must have performed a reservation. */
874 0 : assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
875 :
876 0 : other->dependencies[d] = hashmap_free(other->dependencies[d]);
877 0 : }
878 :
879 2144 : int unit_merge(Unit *u, Unit *other) {
880 : UnitDependency d;
881 2144 : const char *other_id = NULL;
882 : int r;
883 :
884 2144 : assert(u);
885 2144 : assert(other);
886 2144 : assert(u->manager == other->manager);
887 2144 : assert(u->type != _UNIT_TYPE_INVALID);
888 :
889 2144 : other = unit_follow_merge(other);
890 :
891 2144 : if (other == u)
892 2144 : return 0;
893 :
894 0 : if (u->type != other->type)
895 0 : return -EINVAL;
896 :
897 0 : if (!u->instance != !other->instance)
898 0 : return -EINVAL;
899 :
900 0 : if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
901 0 : return -EEXIST;
902 :
903 0 : if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
904 0 : return -EEXIST;
905 :
906 0 : if (other->job)
907 0 : return -EEXIST;
908 :
909 0 : if (other->nop_job)
910 0 : return -EEXIST;
911 :
912 0 : if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
913 0 : return -EEXIST;
914 :
915 0 : if (other->id)
916 0 : other_id = strdupa(other->id);
917 :
918 : /* Make reservations to ensure merge_dependencies() won't fail */
919 0 : for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
920 0 : r = reserve_dependencies(u, other, d);
921 : /*
922 : * We don't rollback reservations if we fail. We don't have
923 : * a way to undo reservations. A reservation is not a leak.
924 : */
925 0 : if (r < 0)
926 0 : return r;
927 : }
928 :
929 : /* Merge names */
930 0 : r = merge_names(u, other);
931 0 : if (r < 0)
932 0 : return r;
933 :
934 : /* Redirect all references */
935 0 : while (other->refs_by_target)
936 0 : unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
937 :
938 : /* Merge dependencies */
939 0 : for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
940 0 : merge_dependencies(u, other, other_id, d);
941 :
942 0 : other->load_state = UNIT_MERGED;
943 0 : other->merged_into = u;
944 :
945 : /* If there is still some data attached to the other node, we
946 : * don't need it anymore, and can free it. */
947 0 : if (other->load_state != UNIT_STUB)
948 0 : if (UNIT_VTABLE(other)->done)
949 0 : UNIT_VTABLE(other)->done(other);
950 :
951 0 : unit_add_to_dbus_queue(u);
952 0 : unit_add_to_cleanup_queue(other);
953 :
954 0 : return 0;
955 : }
956 :
957 2180 : int unit_merge_by_name(Unit *u, const char *name) {
958 2180 : _cleanup_free_ char *s = NULL;
959 : Unit *other;
960 : int r;
961 :
962 : /* Either add name to u, or if a unit with name already exists, merge it with u.
963 : * If name is a template, do the same for name@instance, where instance is u's instance. */
964 :
965 2180 : assert(u);
966 2180 : assert(name);
967 :
968 2180 : if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
969 0 : if (!u->instance)
970 0 : return -EINVAL;
971 :
972 0 : r = unit_name_replace_instance(name, u->instance, &s);
973 0 : if (r < 0)
974 0 : return r;
975 :
976 0 : name = s;
977 : }
978 :
979 2180 : other = manager_get_unit(u->manager, name);
980 2180 : if (other)
981 2144 : return unit_merge(u, other);
982 :
983 36 : return unit_add_name(u, name);
984 : }
985 :
986 8745 : Unit* unit_follow_merge(Unit *u) {
987 8745 : assert(u);
988 :
989 8745 : while (u->load_state == UNIT_MERGED)
990 0 : assert_se(u = u->merged_into);
991 :
992 8745 : return u;
993 : }
994 :
995 275 : int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
996 : ExecDirectoryType dt;
997 : char **dp;
998 : int r;
999 :
1000 275 : assert(u);
1001 275 : assert(c);
1002 :
1003 275 : if (c->working_directory && !c->working_directory_missing_ok) {
1004 0 : r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1005 0 : if (r < 0)
1006 0 : return r;
1007 : }
1008 :
1009 275 : if (c->root_directory) {
1010 0 : r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1011 0 : if (r < 0)
1012 0 : return r;
1013 : }
1014 :
1015 275 : if (c->root_image) {
1016 0 : r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1017 0 : if (r < 0)
1018 0 : return r;
1019 : }
1020 :
1021 1650 : for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1022 1375 : if (!u->manager->prefix[dt])
1023 0 : continue;
1024 :
1025 1375 : STRV_FOREACH(dp, c->directories[dt].paths) {
1026 0 : _cleanup_free_ char *p;
1027 :
1028 0 : p = path_join(u->manager->prefix[dt], *dp);
1029 0 : if (!p)
1030 0 : return -ENOMEM;
1031 :
1032 0 : r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1033 0 : if (r < 0)
1034 0 : return r;
1035 : }
1036 : }
1037 :
1038 275 : if (!MANAGER_IS_SYSTEM(u->manager))
1039 275 : return 0;
1040 :
1041 0 : if (c->private_tmp) {
1042 : const char *p;
1043 :
1044 0 : FOREACH_STRING(p, "/tmp", "/var/tmp") {
1045 0 : r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1046 0 : if (r < 0)
1047 0 : return r;
1048 : }
1049 :
1050 0 : r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1051 0 : if (r < 0)
1052 0 : return r;
1053 : }
1054 :
1055 0 : if (!IN_SET(c->std_output,
1056 : EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1057 : EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1058 0 : EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1059 0 : !IN_SET(c->std_error,
1060 : EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1061 : EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1062 : EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE))
1063 0 : return 0;
1064 :
1065 : /* If syslog or kernel logging is requested, make sure our own
1066 : * logging daemon is run first. */
1067 :
1068 0 : r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1069 0 : if (r < 0)
1070 0 : return r;
1071 :
1072 0 : return 0;
1073 : }
1074 :
1075 1216 : const char *unit_description(Unit *u) {
1076 1216 : assert(u);
1077 :
1078 1216 : if (u->description)
1079 1030 : return u->description;
1080 :
1081 186 : return strna(u->id);
1082 : }
1083 :
1084 26 : const char *unit_status_string(Unit *u) {
1085 26 : assert(u);
1086 :
1087 26 : if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1088 0 : return u->id;
1089 :
1090 26 : return unit_description(u);
1091 : }
1092 :
1093 6256 : static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1094 : const struct {
1095 : UnitDependencyMask mask;
1096 : const char *name;
1097 6256 : } table[] = {
1098 : { UNIT_DEPENDENCY_FILE, "file" },
1099 : { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1100 : { UNIT_DEPENDENCY_DEFAULT, "default" },
1101 : { UNIT_DEPENDENCY_UDEV, "udev" },
1102 : { UNIT_DEPENDENCY_PATH, "path" },
1103 : { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1104 : { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1105 : { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1106 : };
1107 : size_t i;
1108 :
1109 6256 : assert(f);
1110 6256 : assert(kind);
1111 6256 : assert(space);
1112 :
1113 13668 : for (i = 0; i < ELEMENTSOF(table); i++) {
1114 :
1115 13668 : if (mask == 0)
1116 6256 : break;
1117 :
1118 7412 : if (FLAGS_SET(mask, table[i].mask)) {
1119 3200 : if (*space)
1120 72 : fputc(' ', f);
1121 : else
1122 3128 : *space = true;
1123 :
1124 3200 : fputs(kind, f);
1125 3200 : fputs("-", f);
1126 3200 : fputs(table[i].name, f);
1127 :
1128 3200 : mask &= ~table[i].mask;
1129 : }
1130 : }
1131 :
1132 6256 : assert(mask == 0);
1133 6256 : }
1134 :
1135 1190 : void unit_dump(Unit *u, FILE *f, const char *prefix) {
1136 : char *t, **j;
1137 : UnitDependency d;
1138 : Iterator i;
1139 : const char *prefix2;
1140 : char timestamp[5][FORMAT_TIMESTAMP_MAX], timespan[FORMAT_TIMESPAN_MAX];
1141 : Unit *following;
1142 1190 : _cleanup_set_free_ Set *following_set = NULL;
1143 : const char *n;
1144 : CGroupMask m;
1145 : int r;
1146 :
1147 1190 : assert(u);
1148 1190 : assert(u->type >= 0);
1149 :
1150 1190 : prefix = strempty(prefix);
1151 5950 : prefix2 = strjoina(prefix, "\t");
1152 :
1153 1190 : fprintf(f,
1154 : "%s-> Unit %s:\n",
1155 : prefix, u->id);
1156 :
1157 2380 : SET_FOREACH(t, u->names, i)
1158 1190 : if (!streq(t, u->id))
1159 0 : fprintf(f, "%s\tAlias: %s\n", prefix, t);
1160 :
1161 10710 : fprintf(f,
1162 : "%s\tDescription: %s\n"
1163 : "%s\tInstance: %s\n"
1164 : "%s\tUnit Load State: %s\n"
1165 : "%s\tUnit Active State: %s\n"
1166 : "%s\tState Change Timestamp: %s\n"
1167 : "%s\tInactive Exit Timestamp: %s\n"
1168 : "%s\tActive Enter Timestamp: %s\n"
1169 : "%s\tActive Exit Timestamp: %s\n"
1170 : "%s\tInactive Enter Timestamp: %s\n"
1171 : "%s\tMay GC: %s\n"
1172 : "%s\tNeed Daemon Reload: %s\n"
1173 : "%s\tTransient: %s\n"
1174 : "%s\tPerpetual: %s\n"
1175 : "%s\tGarbage Collection Mode: %s\n"
1176 : "%s\tSlice: %s\n"
1177 : "%s\tCGroup: %s\n"
1178 : "%s\tCGroup realized: %s\n",
1179 : prefix, unit_description(u),
1180 1190 : prefix, strna(u->instance),
1181 : prefix, unit_load_state_to_string(u->load_state),
1182 : prefix, unit_active_state_to_string(unit_active_state(u)),
1183 1190 : prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->state_change_timestamp.realtime)),
1184 1190 : prefix, strna(format_timestamp(timestamp[1], sizeof(timestamp[1]), u->inactive_exit_timestamp.realtime)),
1185 1190 : prefix, strna(format_timestamp(timestamp[2], sizeof(timestamp[2]), u->active_enter_timestamp.realtime)),
1186 1190 : prefix, strna(format_timestamp(timestamp[3], sizeof(timestamp[3]), u->active_exit_timestamp.realtime)),
1187 1190 : prefix, strna(format_timestamp(timestamp[4], sizeof(timestamp[4]), u->inactive_enter_timestamp.realtime)),
1188 1190 : prefix, yes_no(unit_may_gc(u)),
1189 1190 : prefix, yes_no(unit_need_daemon_reload(u)),
1190 1190 : prefix, yes_no(u->transient),
1191 1190 : prefix, yes_no(u->perpetual),
1192 : prefix, collect_mode_to_string(u->collect_mode),
1193 : prefix, strna(unit_slice_name(u)),
1194 1190 : prefix, strna(u->cgroup_path),
1195 1190 : prefix, yes_no(u->cgroup_realized));
1196 :
1197 1190 : if (u->cgroup_realized_mask != 0) {
1198 0 : _cleanup_free_ char *s = NULL;
1199 0 : (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1200 0 : fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1201 : }
1202 :
1203 1190 : if (u->cgroup_enabled_mask != 0) {
1204 0 : _cleanup_free_ char *s = NULL;
1205 0 : (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1206 0 : fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1207 : }
1208 :
1209 1190 : m = unit_get_own_mask(u);
1210 1190 : if (m != 0) {
1211 182 : _cleanup_free_ char *s = NULL;
1212 182 : (void) cg_mask_to_string(m, &s);
1213 182 : fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1214 : }
1215 :
1216 1190 : m = unit_get_members_mask(u);
1217 1190 : if (m != 0) {
1218 6 : _cleanup_free_ char *s = NULL;
1219 6 : (void) cg_mask_to_string(m, &s);
1220 6 : fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1221 : }
1222 :
1223 1190 : m = unit_get_delegate_mask(u);
1224 1190 : if (m != 0) {
1225 0 : _cleanup_free_ char *s = NULL;
1226 0 : (void) cg_mask_to_string(m, &s);
1227 0 : fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1228 : }
1229 :
1230 1190 : if (!sd_id128_is_null(u->invocation_id))
1231 12672 : fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1232 792 : prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1233 :
1234 1232 : STRV_FOREACH(j, u->documentation)
1235 42 : fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1236 :
1237 1190 : following = unit_following(u);
1238 1190 : if (following)
1239 468 : fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1240 :
1241 1190 : r = unit_following_set(u, &following_set);
1242 1190 : if (r >= 0) {
1243 : Unit *other;
1244 :
1245 3470 : SET_FOREACH(other, following_set, i)
1246 2280 : fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1247 : }
1248 :
1249 1190 : if (u->fragment_path)
1250 74 : fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1251 :
1252 1190 : if (u->source_path)
1253 84 : fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1254 :
1255 1190 : STRV_FOREACH(j, u->dropin_paths)
1256 0 : fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1257 :
1258 1190 : if (u->failure_action != EMERGENCY_ACTION_NONE)
1259 0 : fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1260 1190 : if (u->failure_action_exit_status >= 0)
1261 0 : fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1262 1190 : if (u->success_action != EMERGENCY_ACTION_NONE)
1263 0 : fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1264 1190 : if (u->success_action_exit_status >= 0)
1265 0 : fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1266 :
1267 1190 : if (u->job_timeout != USEC_INFINITY)
1268 0 : fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1269 :
1270 1190 : if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1271 0 : fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1272 :
1273 1190 : if (u->job_timeout_reboot_arg)
1274 0 : fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1275 :
1276 1190 : condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1277 1190 : condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1278 :
1279 1190 : if (dual_timestamp_is_set(&u->condition_timestamp))
1280 0 : fprintf(f,
1281 : "%s\tCondition Timestamp: %s\n"
1282 : "%s\tCondition Result: %s\n",
1283 0 : prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->condition_timestamp.realtime)),
1284 0 : prefix, yes_no(u->condition_result));
1285 :
1286 1190 : if (dual_timestamp_is_set(&u->assert_timestamp))
1287 0 : fprintf(f,
1288 : "%s\tAssert Timestamp: %s\n"
1289 : "%s\tAssert Result: %s\n",
1290 0 : prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->assert_timestamp.realtime)),
1291 0 : prefix, yes_no(u->assert_result));
1292 :
1293 27370 : for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1294 : UnitDependencyInfo di;
1295 : Unit *other;
1296 :
1297 29176 : HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1298 2996 : bool space = false;
1299 :
1300 2996 : fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1301 :
1302 2996 : print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1303 2996 : print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1304 :
1305 2996 : fputs(")\n", f);
1306 : }
1307 : }
1308 :
1309 1190 : if (!hashmap_isempty(u->requires_mounts_for)) {
1310 : UnitDependencyInfo di;
1311 : const char *path;
1312 :
1313 258 : HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1314 132 : bool space = false;
1315 :
1316 132 : fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1317 :
1318 132 : print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1319 132 : print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1320 :
1321 132 : fputs(")\n", f);
1322 : }
1323 : }
1324 :
1325 1190 : if (u->load_state == UNIT_LOADED) {
1326 :
1327 1004 : fprintf(f,
1328 : "%s\tStopWhenUnneeded: %s\n"
1329 : "%s\tRefuseManualStart: %s\n"
1330 : "%s\tRefuseManualStop: %s\n"
1331 : "%s\tDefaultDependencies: %s\n"
1332 : "%s\tOnFailureJobMode: %s\n"
1333 : "%s\tIgnoreOnIsolate: %s\n",
1334 1004 : prefix, yes_no(u->stop_when_unneeded),
1335 1004 : prefix, yes_no(u->refuse_manual_start),
1336 1004 : prefix, yes_no(u->refuse_manual_stop),
1337 1004 : prefix, yes_no(u->default_dependencies),
1338 : prefix, job_mode_to_string(u->on_failure_job_mode),
1339 1004 : prefix, yes_no(u->ignore_on_isolate));
1340 :
1341 1004 : if (UNIT_VTABLE(u)->dump)
1342 1004 : UNIT_VTABLE(u)->dump(u, f, prefix2);
1343 :
1344 186 : } else if (u->load_state == UNIT_MERGED)
1345 0 : fprintf(f,
1346 : "%s\tMerged into: %s\n",
1347 0 : prefix, u->merged_into->id);
1348 186 : else if (u->load_state == UNIT_ERROR)
1349 0 : fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror_safe(u->load_error));
1350 :
1351 1190 : for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1352 0 : fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1353 :
1354 1190 : if (u->job)
1355 11 : job_dump(u->job, f, prefix2);
1356 :
1357 1190 : if (u->nop_job)
1358 0 : job_dump(u->nop_job, f, prefix2);
1359 1190 : }
1360 :
1361 : /* Common implementation for multiple backends */
1362 399 : int unit_load_fragment_and_dropin(Unit *u) {
1363 : int r;
1364 :
1365 399 : assert(u);
1366 :
1367 : /* Load a .{service,socket,...} file */
1368 399 : r = unit_load_fragment(u);
1369 399 : if (r < 0)
1370 0 : return r;
1371 :
1372 399 : if (u->load_state == UNIT_STUB)
1373 351 : return -ENOENT;
1374 :
1375 : /* Load drop-in directory data. If u is an alias, we might be reloading the
1376 : * target unit needlessly. But we cannot be sure which drops-ins have already
1377 : * been loaded and which not, at least without doing complicated book-keeping,
1378 : * so let's always reread all drop-ins. */
1379 48 : return unit_load_dropin(unit_follow_merge(u));
1380 : }
1381 :
1382 : /* Common implementation for multiple backends */
1383 1712 : int unit_load_fragment_and_dropin_optional(Unit *u) {
1384 : int r;
1385 :
1386 1712 : assert(u);
1387 :
1388 : /* Same as unit_load_fragment_and_dropin(), but whether
1389 : * something can be loaded or not doesn't matter. */
1390 :
1391 : /* Load a .service/.socket/.slice/… file */
1392 1712 : r = unit_load_fragment(u);
1393 1712 : if (r < 0)
1394 0 : return r;
1395 :
1396 1712 : if (u->load_state == UNIT_STUB)
1397 1694 : u->load_state = UNIT_LOADED;
1398 :
1399 : /* Load drop-in directory data */
1400 1712 : return unit_load_dropin(unit_follow_merge(u));
1401 : }
1402 :
1403 1793 : void unit_add_to_target_deps_queue(Unit *u) {
1404 1793 : Manager *m = u->manager;
1405 :
1406 1793 : assert(u);
1407 :
1408 1793 : if (u->in_target_deps_queue)
1409 0 : return;
1410 :
1411 1793 : LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1412 1793 : u->in_target_deps_queue = true;
1413 : }
1414 :
1415 405 : int unit_add_default_target_dependency(Unit *u, Unit *target) {
1416 405 : assert(u);
1417 405 : assert(target);
1418 :
1419 405 : if (target->type != UNIT_TARGET)
1420 291 : return 0;
1421 :
1422 : /* Only add the dependency if both units are loaded, so that
1423 : * that loop check below is reliable */
1424 114 : if (u->load_state != UNIT_LOADED ||
1425 44 : target->load_state != UNIT_LOADED)
1426 70 : return 0;
1427 :
1428 : /* If either side wants no automatic dependencies, then let's
1429 : * skip this */
1430 44 : if (!u->default_dependencies ||
1431 40 : !target->default_dependencies)
1432 4 : return 0;
1433 :
1434 : /* Don't create loops */
1435 40 : if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1436 0 : return 0;
1437 :
1438 40 : return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1439 : }
1440 :
1441 1793 : static int unit_add_slice_dependencies(Unit *u) {
1442 : UnitDependencyMask mask;
1443 1793 : assert(u);
1444 :
1445 1793 : if (!UNIT_HAS_CGROUP_CONTEXT(u))
1446 1500 : return 0;
1447 :
1448 : /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1449 : name), while all other units are ordered based on configuration (as in their case Slice= configures the
1450 : relationship). */
1451 293 : mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1452 :
1453 293 : if (UNIT_ISSET(u->slice))
1454 282 : return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1455 :
1456 11 : if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1457 11 : return 0;
1458 :
1459 0 : return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1460 : }
1461 :
1462 1793 : static int unit_add_mount_dependencies(Unit *u) {
1463 : UnitDependencyInfo di;
1464 : const char *path;
1465 : Iterator i;
1466 : int r;
1467 :
1468 1793 : assert(u);
1469 :
1470 2042 : HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1471 249 : char prefix[strlen(path) + 1];
1472 :
1473 1051 : PATH_FOREACH_PREFIX_MORE(prefix, path) {
1474 802 : _cleanup_free_ char *p = NULL;
1475 : Unit *m;
1476 :
1477 802 : r = unit_name_from_path(prefix, ".mount", &p);
1478 802 : if (r < 0)
1479 0 : return r;
1480 :
1481 802 : m = manager_get_unit(u->manager, p);
1482 802 : if (!m) {
1483 : /* Make sure to load the mount unit if
1484 : * it exists. If so the dependencies
1485 : * on this unit will be added later
1486 : * during the loading of the mount
1487 : * unit. */
1488 282 : (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1489 282 : continue;
1490 : }
1491 520 : if (m == u)
1492 0 : continue;
1493 :
1494 520 : if (m->load_state != UNIT_LOADED)
1495 484 : continue;
1496 :
1497 36 : r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1498 36 : if (r < 0)
1499 0 : return r;
1500 :
1501 36 : if (m->fragment_path) {
1502 0 : r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1503 0 : if (r < 0)
1504 0 : return r;
1505 : }
1506 : }
1507 : }
1508 :
1509 1793 : return 0;
1510 : }
1511 :
1512 1793 : static int unit_add_startup_units(Unit *u) {
1513 : CGroupContext *c;
1514 : int r;
1515 :
1516 1793 : c = unit_get_cgroup_context(u);
1517 1793 : if (!c)
1518 1500 : return 0;
1519 :
1520 293 : if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1521 293 : c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1522 293 : c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1523 293 : return 0;
1524 :
1525 0 : r = set_ensure_allocated(&u->manager->startup_units, NULL);
1526 0 : if (r < 0)
1527 0 : return r;
1528 :
1529 0 : return set_put(u->manager->startup_units, u);
1530 : }
1531 :
1532 2155 : int unit_load(Unit *u) {
1533 : int r;
1534 :
1535 2155 : assert(u);
1536 :
1537 2155 : if (u->in_load_queue) {
1538 2155 : LIST_REMOVE(load_queue, u->manager->load_queue, u);
1539 2155 : u->in_load_queue = false;
1540 : }
1541 :
1542 2155 : if (u->type == _UNIT_TYPE_INVALID)
1543 0 : return -EINVAL;
1544 :
1545 2155 : if (u->load_state != UNIT_STUB)
1546 0 : return 0;
1547 :
1548 2155 : if (u->transient_file) {
1549 : /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1550 : * is complete, hence let's synchronize the unit file we just wrote to disk. */
1551 :
1552 0 : r = fflush_and_check(u->transient_file);
1553 0 : if (r < 0)
1554 0 : goto fail;
1555 :
1556 0 : u->transient_file = safe_fclose(u->transient_file);
1557 0 : u->fragment_mtime = now(CLOCK_REALTIME);
1558 : }
1559 :
1560 2155 : if (UNIT_VTABLE(u)->load) {
1561 2155 : r = UNIT_VTABLE(u)->load(u);
1562 2155 : if (r < 0)
1563 362 : goto fail;
1564 : }
1565 :
1566 1793 : if (u->load_state == UNIT_STUB) {
1567 0 : r = -ENOENT;
1568 0 : goto fail;
1569 : }
1570 :
1571 1793 : if (u->load_state == UNIT_LOADED) {
1572 1793 : unit_add_to_target_deps_queue(u);
1573 :
1574 1793 : r = unit_add_slice_dependencies(u);
1575 1793 : if (r < 0)
1576 0 : goto fail;
1577 :
1578 1793 : r = unit_add_mount_dependencies(u);
1579 1793 : if (r < 0)
1580 0 : goto fail;
1581 :
1582 1793 : r = unit_add_startup_units(u);
1583 1793 : if (r < 0)
1584 0 : goto fail;
1585 :
1586 1793 : if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1587 0 : log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1588 0 : r = -ENOEXEC;
1589 0 : goto fail;
1590 : }
1591 :
1592 1793 : if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1593 0 : log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1594 :
1595 : /* We finished loading, let's ensure our parents recalculate the members mask */
1596 1793 : unit_invalidate_cgroup_members_masks(u);
1597 : }
1598 :
1599 1793 : assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1600 :
1601 1793 : unit_add_to_dbus_queue(unit_follow_merge(u));
1602 1793 : unit_add_to_gc_queue(u);
1603 :
1604 1793 : return 0;
1605 :
1606 362 : fail:
1607 : /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1608 : * return ENOEXEC to ensure units are placed in this state after loading */
1609 :
1610 362 : u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1611 : r == -ENOEXEC ? UNIT_BAD_SETTING :
1612 : UNIT_ERROR;
1613 362 : u->load_error = r;
1614 :
1615 362 : unit_add_to_dbus_queue(u);
1616 362 : unit_add_to_gc_queue(u);
1617 :
1618 362 : return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1619 : }
1620 :
1621 : _printf_(7, 8)
1622 0 : static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1623 0 : Unit *u = userdata;
1624 : va_list ap;
1625 : int r;
1626 :
1627 0 : va_start(ap, format);
1628 0 : if (u)
1629 0 : r = log_object_internalv(level, error, file, line, func,
1630 0 : u->manager->unit_log_field,
1631 0 : u->id,
1632 0 : u->manager->invocation_log_field,
1633 0 : u->invocation_id_string,
1634 : format, ap);
1635 : else
1636 0 : r = log_internalv(level, error, file, line, func, format, ap);
1637 0 : va_end(ap);
1638 :
1639 0 : return r;
1640 : }
1641 :
1642 31 : static bool unit_test_condition(Unit *u) {
1643 31 : assert(u);
1644 :
1645 31 : dual_timestamp_get(&u->condition_timestamp);
1646 31 : u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1647 :
1648 31 : unit_add_to_dbus_queue(u);
1649 :
1650 31 : return u->condition_result;
1651 : }
1652 :
1653 31 : static bool unit_test_assert(Unit *u) {
1654 31 : assert(u);
1655 :
1656 31 : dual_timestamp_get(&u->assert_timestamp);
1657 31 : u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1658 :
1659 31 : unit_add_to_dbus_queue(u);
1660 :
1661 31 : return u->assert_result;
1662 : }
1663 :
1664 26 : void unit_status_printf(Unit *u, const char *status, const char *unit_status_msg_format) {
1665 : const char *d;
1666 :
1667 26 : d = unit_status_string(u);
1668 26 : if (log_get_show_color())
1669 0 : d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1670 :
1671 : DISABLE_WARNING_FORMAT_NONLITERAL;
1672 26 : manager_status_printf(u->manager, STATUS_TYPE_NORMAL, status, unit_status_msg_format, d);
1673 : REENABLE_WARNING;
1674 26 : }
1675 :
1676 13 : int unit_test_start_limit(Unit *u) {
1677 : const char *reason;
1678 :
1679 13 : assert(u);
1680 :
1681 13 : if (ratelimit_below(&u->start_limit)) {
1682 13 : u->start_limit_hit = false;
1683 13 : return 0;
1684 : }
1685 :
1686 0 : log_unit_warning(u, "Start request repeated too quickly.");
1687 0 : u->start_limit_hit = true;
1688 :
1689 0 : reason = strjoina("unit ", u->id, " failed");
1690 :
1691 0 : emergency_action(u->manager, u->start_limit_action,
1692 : EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1693 0 : u->reboot_arg, -1, reason);
1694 :
1695 0 : return -ECANCELED;
1696 : }
1697 :
1698 0 : bool unit_shall_confirm_spawn(Unit *u) {
1699 0 : assert(u);
1700 :
1701 0 : if (manager_is_confirm_spawn_disabled(u->manager))
1702 0 : return false;
1703 :
1704 : /* For some reasons units remaining in the same process group
1705 : * as PID 1 fail to acquire the console even if it's not used
1706 : * by any process. So skip the confirmation question for them. */
1707 0 : return !unit_get_exec_context(u)->same_pgrp;
1708 : }
1709 :
1710 31 : static bool unit_verify_deps(Unit *u) {
1711 : Unit *other;
1712 : Iterator j;
1713 : void *v;
1714 :
1715 31 : assert(u);
1716 :
1717 : /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1718 : * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1719 : * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1720 : * conjunction with After= as for them any such check would make things entirely racy. */
1721 :
1722 31 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1723 :
1724 0 : if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1725 0 : continue;
1726 :
1727 0 : if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1728 0 : log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1729 0 : return false;
1730 : }
1731 : }
1732 :
1733 31 : return true;
1734 : }
1735 :
1736 : /* Errors that aren't really errors:
1737 : * -EALREADY: Unit is already started.
1738 : * -ECOMM: Condition failed
1739 : * -EAGAIN: An operation is already in progress. Retry later.
1740 : *
1741 : * Errors that are real errors:
1742 : * -EBADR: This unit type does not support starting.
1743 : * -ECANCELED: Start limit hit, too many requests for now
1744 : * -EPROTO: Assert failed
1745 : * -EINVAL: Unit not loaded
1746 : * -EOPNOTSUPP: Unit type not supported
1747 : * -ENOLINK: The necessary dependencies are not fulfilled.
1748 : * -ESTALE: This unit has been started before and can't be started a second time
1749 : * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1750 : */
1751 31 : int unit_start(Unit *u) {
1752 : UnitActiveState state;
1753 : Unit *following;
1754 : int r;
1755 :
1756 31 : assert(u);
1757 :
1758 : /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1759 : * is not startable by the user. This is relied on to detect when we need to wait for units and when
1760 : * waiting is finished. */
1761 31 : state = unit_active_state(u);
1762 31 : if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1763 0 : return -EALREADY;
1764 31 : if (state == UNIT_MAINTENANCE)
1765 0 : return -EAGAIN;
1766 :
1767 : /* Units that aren't loaded cannot be started */
1768 31 : if (u->load_state != UNIT_LOADED)
1769 0 : return -EINVAL;
1770 :
1771 : /* Refuse starting scope units more than once */
1772 31 : if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1773 0 : return -ESTALE;
1774 :
1775 : /* If the conditions failed, don't do anything at all. If we already are activating this call might
1776 : * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1777 : * recheck the condition in that case. */
1778 31 : if (state != UNIT_ACTIVATING &&
1779 31 : !unit_test_condition(u)) {
1780 :
1781 : /* Let's also check the start limit here. Normally, the start limit is only checked by the
1782 : * .start() method of the unit type after it did some additional checks verifying everything
1783 : * is in order (so that those other checks can propagate errors properly). However, if a
1784 : * condition check doesn't hold we don't get that far but we should still ensure we are not
1785 : * called in a tight loop without a rate limit check enforced, hence do the check here. Note
1786 : * that ECOMM is generally not a reason for a job to fail, unlike most other errors here,
1787 : * hence the chance is big that any triggering unit for us will trigger us again. Note this
1788 : * condition check is a bit different from the condition check inside the per-unit .start()
1789 : * function, as this one will not change the unit's state in any way (and we shouldn't here,
1790 : * after all the condition failed). */
1791 :
1792 0 : r = unit_test_start_limit(u);
1793 0 : if (r < 0)
1794 0 : return r;
1795 :
1796 0 : return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1797 : }
1798 :
1799 : /* If the asserts failed, fail the entire job */
1800 31 : if (state != UNIT_ACTIVATING &&
1801 31 : !unit_test_assert(u))
1802 0 : return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1803 :
1804 : /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1805 : * condition checks, so that we rather return condition check errors (which are usually not
1806 : * considered a true failure) than "not supported" errors (which are considered a failure).
1807 : */
1808 31 : if (!unit_type_supported(u->type))
1809 0 : return -EOPNOTSUPP;
1810 :
1811 : /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1812 : * should have taken care of this already, but let's check this here again. After all, our
1813 : * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1814 31 : if (!unit_verify_deps(u))
1815 0 : return -ENOLINK;
1816 :
1817 : /* Forward to the main object, if we aren't it. */
1818 31 : following = unit_following(u);
1819 31 : if (following) {
1820 0 : log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1821 0 : return unit_start(following);
1822 : }
1823 :
1824 : /* If it is stopped, but we cannot start it, then fail */
1825 31 : if (!UNIT_VTABLE(u)->start)
1826 0 : return -EBADR;
1827 :
1828 : /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1829 : * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1830 : * waits for a holdoff timer to elapse before it will start again. */
1831 :
1832 31 : unit_add_to_dbus_queue(u);
1833 :
1834 31 : return UNIT_VTABLE(u)->start(u);
1835 : }
1836 :
1837 1 : bool unit_can_start(Unit *u) {
1838 1 : assert(u);
1839 :
1840 1 : if (u->load_state != UNIT_LOADED)
1841 0 : return false;
1842 :
1843 1 : if (!unit_type_supported(u->type))
1844 0 : return false;
1845 :
1846 : /* Scope units may be started only once */
1847 1 : if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1848 0 : return false;
1849 :
1850 1 : return !!UNIT_VTABLE(u)->start;
1851 : }
1852 :
1853 0 : bool unit_can_isolate(Unit *u) {
1854 0 : assert(u);
1855 :
1856 0 : return unit_can_start(u) &&
1857 0 : u->allow_isolate;
1858 : }
1859 :
1860 : /* Errors:
1861 : * -EBADR: This unit type does not support stopping.
1862 : * -EALREADY: Unit is already stopped.
1863 : * -EAGAIN: An operation is already in progress. Retry later.
1864 : */
1865 7 : int unit_stop(Unit *u) {
1866 : UnitActiveState state;
1867 : Unit *following;
1868 :
1869 7 : assert(u);
1870 :
1871 7 : state = unit_active_state(u);
1872 7 : if (UNIT_IS_INACTIVE_OR_FAILED(state))
1873 0 : return -EALREADY;
1874 :
1875 7 : following = unit_following(u);
1876 7 : if (following) {
1877 0 : log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1878 0 : return unit_stop(following);
1879 : }
1880 :
1881 7 : if (!UNIT_VTABLE(u)->stop)
1882 0 : return -EBADR;
1883 :
1884 7 : unit_add_to_dbus_queue(u);
1885 :
1886 7 : return UNIT_VTABLE(u)->stop(u);
1887 : }
1888 :
1889 1 : bool unit_can_stop(Unit *u) {
1890 1 : assert(u);
1891 :
1892 1 : if (!unit_type_supported(u->type))
1893 0 : return false;
1894 :
1895 1 : if (u->perpetual)
1896 0 : return false;
1897 :
1898 1 : return !!UNIT_VTABLE(u)->stop;
1899 : }
1900 :
1901 : /* Errors:
1902 : * -EBADR: This unit type does not support reloading.
1903 : * -ENOEXEC: Unit is not started.
1904 : * -EAGAIN: An operation is already in progress. Retry later.
1905 : */
1906 0 : int unit_reload(Unit *u) {
1907 : UnitActiveState state;
1908 : Unit *following;
1909 :
1910 0 : assert(u);
1911 :
1912 0 : if (u->load_state != UNIT_LOADED)
1913 0 : return -EINVAL;
1914 :
1915 0 : if (!unit_can_reload(u))
1916 0 : return -EBADR;
1917 :
1918 0 : state = unit_active_state(u);
1919 0 : if (state == UNIT_RELOADING)
1920 0 : return -EAGAIN;
1921 :
1922 0 : if (state != UNIT_ACTIVE) {
1923 0 : log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1924 0 : return -ENOEXEC;
1925 : }
1926 :
1927 0 : following = unit_following(u);
1928 0 : if (following) {
1929 0 : log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1930 0 : return unit_reload(following);
1931 : }
1932 :
1933 0 : unit_add_to_dbus_queue(u);
1934 :
1935 0 : if (!UNIT_VTABLE(u)->reload) {
1936 : /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1937 0 : unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1938 0 : return 0;
1939 : }
1940 :
1941 0 : return UNIT_VTABLE(u)->reload(u);
1942 : }
1943 :
1944 0 : bool unit_can_reload(Unit *u) {
1945 0 : assert(u);
1946 :
1947 0 : if (UNIT_VTABLE(u)->can_reload)
1948 0 : return UNIT_VTABLE(u)->can_reload(u);
1949 :
1950 0 : if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1951 0 : return true;
1952 :
1953 0 : return UNIT_VTABLE(u)->reload;
1954 : }
1955 :
1956 0 : bool unit_is_unneeded(Unit *u) {
1957 : static const UnitDependency deps[] = {
1958 : UNIT_REQUIRED_BY,
1959 : UNIT_REQUISITE_OF,
1960 : UNIT_WANTED_BY,
1961 : UNIT_BOUND_BY,
1962 : };
1963 : size_t j;
1964 :
1965 0 : assert(u);
1966 :
1967 0 : if (!u->stop_when_unneeded)
1968 0 : return false;
1969 :
1970 : /* Don't clean up while the unit is transitioning or is even inactive. */
1971 0 : if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1972 0 : return false;
1973 0 : if (u->job)
1974 0 : return false;
1975 :
1976 0 : for (j = 0; j < ELEMENTSOF(deps); j++) {
1977 : Unit *other;
1978 : Iterator i;
1979 : void *v;
1980 :
1981 : /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1982 : * restart, then don't clean this one up. */
1983 :
1984 0 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1985 0 : if (other->job)
1986 0 : return false;
1987 :
1988 0 : if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1989 0 : return false;
1990 :
1991 0 : if (unit_will_restart(other))
1992 0 : return false;
1993 : }
1994 : }
1995 :
1996 0 : return true;
1997 : }
1998 :
1999 7 : static void check_unneeded_dependencies(Unit *u) {
2000 :
2001 : static const UnitDependency deps[] = {
2002 : UNIT_REQUIRES,
2003 : UNIT_REQUISITE,
2004 : UNIT_WANTS,
2005 : UNIT_BINDS_TO,
2006 : };
2007 : size_t j;
2008 :
2009 7 : assert(u);
2010 :
2011 : /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2012 :
2013 35 : for (j = 0; j < ELEMENTSOF(deps); j++) {
2014 : Unit *other;
2015 : Iterator i;
2016 : void *v;
2017 :
2018 28 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2019 0 : unit_submit_to_stop_when_unneeded_queue(other);
2020 : }
2021 7 : }
2022 :
2023 1749 : static void unit_check_binds_to(Unit *u) {
2024 1749 : _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2025 1749 : bool stop = false;
2026 : Unit *other;
2027 : Iterator i;
2028 : void *v;
2029 : int r;
2030 :
2031 1749 : assert(u);
2032 :
2033 1749 : if (u->job)
2034 6 : return;
2035 :
2036 1743 : if (unit_active_state(u) != UNIT_ACTIVE)
2037 18 : return;
2038 :
2039 1725 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2040 0 : if (other->job)
2041 0 : continue;
2042 :
2043 0 : if (!other->coldplugged)
2044 : /* We might yet create a job for the other unit… */
2045 0 : continue;
2046 :
2047 0 : if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2048 0 : continue;
2049 :
2050 0 : stop = true;
2051 0 : break;
2052 : }
2053 :
2054 1725 : if (!stop)
2055 1725 : return;
2056 :
2057 : /* If stopping a unit fails continuously we might enter a stop
2058 : * loop here, hence stop acting on the service being
2059 : * unnecessary after a while. */
2060 0 : if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2061 0 : log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2062 0 : return;
2063 : }
2064 :
2065 0 : assert(other);
2066 0 : log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2067 :
2068 : /* A unit we need to run is gone. Sniff. Let's stop this. */
2069 0 : r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2070 0 : if (r < 0)
2071 0 : log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2072 : }
2073 :
2074 1712 : static void retroactively_start_dependencies(Unit *u) {
2075 : Iterator i;
2076 : Unit *other;
2077 : void *v;
2078 :
2079 1712 : assert(u);
2080 1712 : assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2081 :
2082 1954 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2083 242 : if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2084 0 : !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2085 0 : manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2086 :
2087 1712 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2088 0 : if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2089 0 : !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2090 0 : manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2091 :
2092 1745 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2093 33 : if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2094 0 : !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2095 0 : manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2096 :
2097 1719 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2098 7 : if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2099 0 : manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2100 :
2101 1712 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2102 0 : if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2103 0 : manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2104 1712 : }
2105 :
2106 7 : static void retroactively_stop_dependencies(Unit *u) {
2107 : Unit *other;
2108 : Iterator i;
2109 : void *v;
2110 :
2111 7 : assert(u);
2112 7 : assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2113 :
2114 : /* Pull down units which are bound to us recursively if enabled */
2115 7 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2116 0 : if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2117 0 : manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2118 7 : }
2119 :
2120 0 : void unit_start_on_failure(Unit *u) {
2121 : Unit *other;
2122 : Iterator i;
2123 : void *v;
2124 : int r;
2125 :
2126 0 : assert(u);
2127 :
2128 0 : if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2129 0 : return;
2130 :
2131 0 : log_unit_info(u, "Triggering OnFailure= dependencies.");
2132 :
2133 0 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2134 0 : _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2135 :
2136 0 : r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2137 0 : if (r < 0)
2138 0 : log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2139 : }
2140 : }
2141 :
2142 1793 : void unit_trigger_notify(Unit *u) {
2143 : Unit *other;
2144 : Iterator i;
2145 : void *v;
2146 :
2147 1793 : assert(u);
2148 :
2149 1799 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2150 6 : if (UNIT_VTABLE(other)->trigger_notify)
2151 6 : UNIT_VTABLE(other)->trigger_notify(other, u);
2152 1793 : }
2153 :
2154 0 : static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2155 0 : if (condition_notice && log_level > LOG_NOTICE)
2156 0 : return LOG_NOTICE;
2157 0 : if (condition_info && log_level > LOG_INFO)
2158 0 : return LOG_INFO;
2159 0 : return log_level;
2160 : }
2161 :
2162 7 : static int unit_log_resources(Unit *u) {
2163 : struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2164 7 : bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2165 7 : _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2166 7 : int log_level = LOG_DEBUG; /* May be raised if resources consumed over a treshold */
2167 7 : size_t n_message_parts = 0, n_iovec = 0;
2168 : char* message_parts[1 + 2 + 2 + 1], *t;
2169 7 : nsec_t nsec = NSEC_INFINITY;
2170 : CGroupIPAccountingMetric m;
2171 : size_t i;
2172 : int r;
2173 7 : const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2174 : [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2175 : [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2176 : [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2177 : [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2178 : };
2179 7 : const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2180 : [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2181 : [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2182 : [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2183 : [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2184 : };
2185 :
2186 7 : assert(u);
2187 :
2188 : /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2189 : * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2190 : * information and the complete data in structured fields. */
2191 :
2192 7 : (void) unit_get_cpu_usage(u, &nsec);
2193 7 : if (nsec != NSEC_INFINITY) {
2194 0 : char buf[FORMAT_TIMESPAN_MAX] = "";
2195 :
2196 : /* Format the CPU time for inclusion in the structured log message */
2197 0 : if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2198 0 : r = log_oom();
2199 0 : goto finish;
2200 : }
2201 0 : iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2202 :
2203 : /* Format the CPU time for inclusion in the human language message string */
2204 0 : format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2205 0 : t = strjoin("consumed ", buf, " CPU time");
2206 0 : if (!t) {
2207 0 : r = log_oom();
2208 0 : goto finish;
2209 : }
2210 :
2211 0 : message_parts[n_message_parts++] = t;
2212 :
2213 0 : log_level = raise_level(log_level,
2214 : nsec > NOTICEWORTHY_CPU_NSEC,
2215 : nsec > MENTIONWORTHY_CPU_NSEC);
2216 : }
2217 :
2218 35 : for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2219 28 : char buf[FORMAT_BYTES_MAX] = "";
2220 28 : uint64_t value = UINT64_MAX;
2221 :
2222 28 : assert(io_fields[k]);
2223 :
2224 28 : (void) unit_get_io_accounting(u, k, k > 0, &value);
2225 28 : if (value == UINT64_MAX)
2226 28 : continue;
2227 :
2228 0 : have_io_accounting = true;
2229 0 : if (value > 0)
2230 0 : any_io = true;
2231 :
2232 : /* Format IO accounting data for inclusion in the structured log message */
2233 0 : if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2234 0 : r = log_oom();
2235 0 : goto finish;
2236 : }
2237 0 : iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2238 :
2239 : /* Format the IO accounting data for inclusion in the human language message string, but only
2240 : * for the bytes counters (and not for the operations counters) */
2241 0 : if (k == CGROUP_IO_READ_BYTES) {
2242 0 : assert(!rr);
2243 0 : rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2244 0 : if (!rr) {
2245 0 : r = log_oom();
2246 0 : goto finish;
2247 : }
2248 0 : } else if (k == CGROUP_IO_WRITE_BYTES) {
2249 0 : assert(!wr);
2250 0 : wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2251 0 : if (!wr) {
2252 0 : r = log_oom();
2253 0 : goto finish;
2254 : }
2255 : }
2256 :
2257 0 : if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2258 0 : log_level = raise_level(log_level,
2259 : value > MENTIONWORTHY_IO_BYTES,
2260 : value > NOTICEWORTHY_IO_BYTES);
2261 : }
2262 :
2263 7 : if (have_io_accounting) {
2264 0 : if (any_io) {
2265 0 : if (rr)
2266 0 : message_parts[n_message_parts++] = TAKE_PTR(rr);
2267 0 : if (wr)
2268 0 : message_parts[n_message_parts++] = TAKE_PTR(wr);
2269 :
2270 : } else {
2271 : char *k;
2272 :
2273 0 : k = strdup("no IO");
2274 0 : if (!k) {
2275 0 : r = log_oom();
2276 0 : goto finish;
2277 : }
2278 :
2279 0 : message_parts[n_message_parts++] = k;
2280 : }
2281 : }
2282 :
2283 35 : for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2284 28 : char buf[FORMAT_BYTES_MAX] = "";
2285 28 : uint64_t value = UINT64_MAX;
2286 :
2287 28 : assert(ip_fields[m]);
2288 :
2289 28 : (void) unit_get_ip_accounting(u, m, &value);
2290 28 : if (value == UINT64_MAX)
2291 28 : continue;
2292 :
2293 0 : have_ip_accounting = true;
2294 0 : if (value > 0)
2295 0 : any_traffic = true;
2296 :
2297 : /* Format IP accounting data for inclusion in the structured log message */
2298 0 : if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2299 0 : r = log_oom();
2300 0 : goto finish;
2301 : }
2302 0 : iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2303 :
2304 : /* Format the IP accounting data for inclusion in the human language message string, but only for the
2305 : * bytes counters (and not for the packets counters) */
2306 0 : if (m == CGROUP_IP_INGRESS_BYTES) {
2307 0 : assert(!igress);
2308 0 : igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2309 0 : if (!igress) {
2310 0 : r = log_oom();
2311 0 : goto finish;
2312 : }
2313 0 : } else if (m == CGROUP_IP_EGRESS_BYTES) {
2314 0 : assert(!egress);
2315 0 : egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2316 0 : if (!egress) {
2317 0 : r = log_oom();
2318 0 : goto finish;
2319 : }
2320 : }
2321 :
2322 0 : if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2323 0 : log_level = raise_level(log_level,
2324 : value > MENTIONWORTHY_IP_BYTES,
2325 : value > NOTICEWORTHY_IP_BYTES);
2326 : }
2327 :
2328 7 : if (have_ip_accounting) {
2329 0 : if (any_traffic) {
2330 0 : if (igress)
2331 0 : message_parts[n_message_parts++] = TAKE_PTR(igress);
2332 0 : if (egress)
2333 0 : message_parts[n_message_parts++] = TAKE_PTR(egress);
2334 :
2335 : } else {
2336 : char *k;
2337 :
2338 0 : k = strdup("no IP traffic");
2339 0 : if (!k) {
2340 0 : r = log_oom();
2341 0 : goto finish;
2342 : }
2343 :
2344 0 : message_parts[n_message_parts++] = k;
2345 : }
2346 : }
2347 :
2348 : /* Is there any accounting data available at all? */
2349 7 : if (n_iovec == 0) {
2350 7 : r = 0;
2351 7 : goto finish;
2352 : }
2353 :
2354 0 : if (n_message_parts == 0)
2355 0 : t = strjoina("MESSAGE=", u->id, ": Completed.");
2356 : else {
2357 0 : _cleanup_free_ char *joined;
2358 :
2359 0 : message_parts[n_message_parts] = NULL;
2360 :
2361 0 : joined = strv_join(message_parts, ", ");
2362 0 : if (!joined) {
2363 0 : r = log_oom();
2364 0 : goto finish;
2365 : }
2366 :
2367 0 : joined[0] = ascii_toupper(joined[0]);
2368 0 : t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2369 : }
2370 :
2371 : /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2372 : * and hence don't increase n_iovec for them */
2373 0 : iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2374 0 : iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2375 :
2376 0 : t = strjoina(u->manager->unit_log_field, u->id);
2377 0 : iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2378 :
2379 0 : t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2380 0 : iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2381 :
2382 0 : log_struct_iovec(log_level, iovec, n_iovec + 4);
2383 0 : r = 0;
2384 :
2385 7 : finish:
2386 7 : for (i = 0; i < n_message_parts; i++)
2387 0 : free(message_parts[i]);
2388 :
2389 7 : for (i = 0; i < n_iovec; i++)
2390 0 : free(iovec[i].iov_base);
2391 :
2392 7 : return r;
2393 :
2394 : }
2395 :
2396 1749 : static void unit_update_on_console(Unit *u) {
2397 : bool b;
2398 :
2399 1749 : assert(u);
2400 :
2401 1749 : b = unit_needs_console(u);
2402 1749 : if (u->on_console == b)
2403 1749 : return;
2404 :
2405 0 : u->on_console = b;
2406 0 : if (b)
2407 0 : manager_ref_console(u->manager);
2408 : else
2409 0 : manager_unref_console(u->manager);
2410 : }
2411 :
2412 1719 : static void unit_emit_audit_start(Unit *u) {
2413 1719 : assert(u);
2414 :
2415 1719 : if (u->type != UNIT_SERVICE)
2416 1719 : return;
2417 :
2418 : /* Write audit record if we have just finished starting up */
2419 0 : manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2420 0 : u->in_audit = true;
2421 : }
2422 :
2423 7 : static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2424 7 : assert(u);
2425 :
2426 7 : if (u->type != UNIT_SERVICE)
2427 7 : return;
2428 :
2429 0 : if (u->in_audit) {
2430 : /* Write audit record if we have just finished shutting down */
2431 0 : manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2432 0 : u->in_audit = false;
2433 : } else {
2434 : /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2435 0 : manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2436 :
2437 0 : if (state == UNIT_INACTIVE)
2438 0 : manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2439 : }
2440 : }
2441 :
2442 24 : static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2443 24 : bool unexpected = false;
2444 : JobResult result;
2445 :
2446 24 : assert(j);
2447 :
2448 24 : if (j->state == JOB_WAITING)
2449 :
2450 : /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2451 : * due to EAGAIN. */
2452 0 : job_add_to_run_queue(j);
2453 :
2454 : /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2455 : * hence needs to invalidate jobs. */
2456 :
2457 24 : switch (j->type) {
2458 :
2459 24 : case JOB_START:
2460 : case JOB_VERIFY_ACTIVE:
2461 :
2462 24 : if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2463 18 : job_finish_and_invalidate(j, JOB_DONE, true, false);
2464 6 : else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2465 0 : unexpected = true;
2466 :
2467 0 : if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2468 0 : if (ns == UNIT_FAILED)
2469 0 : result = JOB_FAILED;
2470 0 : else if (FLAGS_SET(flags, UNIT_NOTIFY_SKIP_CONDITION))
2471 0 : result = JOB_SKIPPED;
2472 : else
2473 0 : result = JOB_DONE;
2474 :
2475 0 : job_finish_and_invalidate(j, result, true, false);
2476 : }
2477 : }
2478 :
2479 24 : break;
2480 :
2481 0 : case JOB_RELOAD:
2482 : case JOB_RELOAD_OR_START:
2483 : case JOB_TRY_RELOAD:
2484 :
2485 0 : if (j->state == JOB_RUNNING) {
2486 0 : if (ns == UNIT_ACTIVE)
2487 0 : job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2488 0 : else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2489 0 : unexpected = true;
2490 :
2491 0 : if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2492 0 : job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2493 : }
2494 : }
2495 :
2496 0 : break;
2497 :
2498 0 : case JOB_STOP:
2499 : case JOB_RESTART:
2500 : case JOB_TRY_RESTART:
2501 :
2502 0 : if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2503 0 : job_finish_and_invalidate(j, JOB_DONE, true, false);
2504 0 : else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2505 0 : unexpected = true;
2506 0 : job_finish_and_invalidate(j, JOB_FAILED, true, false);
2507 : }
2508 :
2509 0 : break;
2510 :
2511 0 : default:
2512 0 : assert_not_reached("Job type unknown");
2513 : }
2514 :
2515 24 : return unexpected;
2516 : }
2517 :
2518 1749 : void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2519 : const char *reason;
2520 : Manager *m;
2521 :
2522 1749 : assert(u);
2523 1749 : assert(os < _UNIT_ACTIVE_STATE_MAX);
2524 1749 : assert(ns < _UNIT_ACTIVE_STATE_MAX);
2525 :
2526 : /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2527 : * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2528 : * remounted this function will be called too! */
2529 :
2530 1749 : m = u->manager;
2531 :
2532 : /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2533 : * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2534 1749 : unit_add_to_dbus_queue(u);
2535 :
2536 : /* Update timestamps for state changes */
2537 1749 : if (!MANAGER_IS_RELOADING(m)) {
2538 1749 : dual_timestamp_get(&u->state_change_timestamp);
2539 :
2540 1749 : if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2541 1736 : u->inactive_exit_timestamp = u->state_change_timestamp;
2542 13 : else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2543 7 : u->inactive_enter_timestamp = u->state_change_timestamp;
2544 :
2545 1749 : if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2546 1719 : u->active_enter_timestamp = u->state_change_timestamp;
2547 30 : else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2548 7 : u->active_exit_timestamp = u->state_change_timestamp;
2549 : }
2550 :
2551 : /* Keep track of failed units */
2552 1749 : (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2553 :
2554 : /* Make sure the cgroup and state files are always removed when we become inactive */
2555 1749 : if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2556 7 : unit_prune_cgroup(u);
2557 7 : unit_unlink_state_files(u);
2558 : }
2559 :
2560 1749 : unit_update_on_console(u);
2561 :
2562 1749 : if (!MANAGER_IS_RELOADING(m)) {
2563 : bool unexpected;
2564 :
2565 : /* Let's propagate state changes to the job */
2566 1749 : if (u->job)
2567 24 : unexpected = unit_process_job(u->job, ns, flags);
2568 : else
2569 1725 : unexpected = true;
2570 :
2571 : /* If this state change happened without being requested by a job, then let's retroactively start or
2572 : * stop dependencies. We skip that step when deserializing, since we don't want to create any
2573 : * additional jobs just because something is already activated. */
2574 :
2575 1749 : if (unexpected) {
2576 1725 : if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2577 1712 : retroactively_start_dependencies(u);
2578 13 : else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2579 7 : retroactively_stop_dependencies(u);
2580 : }
2581 :
2582 : /* stop unneeded units regardless if going down was expected or not */
2583 1749 : if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2584 7 : check_unneeded_dependencies(u);
2585 :
2586 1749 : if (ns != os && ns == UNIT_FAILED) {
2587 0 : log_unit_debug(u, "Unit entered failed state.");
2588 :
2589 0 : if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2590 0 : unit_start_on_failure(u);
2591 : }
2592 :
2593 1749 : if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2594 : /* This unit just finished starting up */
2595 :
2596 1719 : unit_emit_audit_start(u);
2597 1719 : manager_send_unit_plymouth(m, u);
2598 : }
2599 :
2600 1749 : if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2601 : /* This unit just stopped/failed. */
2602 :
2603 7 : unit_emit_audit_stop(u, ns);
2604 7 : unit_log_resources(u);
2605 : }
2606 : }
2607 :
2608 1749 : manager_recheck_journal(m);
2609 1749 : manager_recheck_dbus(m);
2610 :
2611 1749 : unit_trigger_notify(u);
2612 :
2613 1749 : if (!MANAGER_IS_RELOADING(m)) {
2614 : /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2615 1749 : unit_submit_to_stop_when_unneeded_queue(u);
2616 :
2617 : /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2618 : * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2619 : * without ever entering started.) */
2620 1749 : unit_check_binds_to(u);
2621 :
2622 1749 : if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2623 0 : reason = strjoina("unit ", u->id, " failed");
2624 0 : emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2625 1749 : } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2626 49 : reason = strjoina("unit ", u->id, " succeeded");
2627 7 : emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2628 : }
2629 : }
2630 :
2631 1749 : unit_add_to_gc_queue(u);
2632 1749 : }
2633 :
2634 6 : int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2635 : int r;
2636 :
2637 6 : assert(u);
2638 6 : assert(pid_is_valid(pid));
2639 :
2640 : /* Watch a specific PID */
2641 :
2642 : /* Caller might be sure that this PID belongs to this unit only. Let's take this
2643 : * opportunity to remove any stalled references to this PID as they can be created
2644 : * easily (when watching a process which is not our direct child). */
2645 6 : if (exclusive)
2646 6 : manager_unwatch_pid(u->manager, pid);
2647 :
2648 6 : r = set_ensure_allocated(&u->pids, NULL);
2649 6 : if (r < 0)
2650 0 : return r;
2651 :
2652 6 : r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2653 6 : if (r < 0)
2654 0 : return r;
2655 :
2656 : /* First try, let's add the unit keyed by "pid". */
2657 6 : r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2658 6 : if (r == -EEXIST) {
2659 : Unit **array;
2660 0 : bool found = false;
2661 0 : size_t n = 0;
2662 :
2663 : /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2664 : * to an array of Units rather than just a Unit), lists us already. */
2665 :
2666 0 : array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2667 0 : if (array)
2668 0 : for (; array[n]; n++)
2669 0 : if (array[n] == u)
2670 0 : found = true;
2671 :
2672 0 : if (found) /* Found it already? if so, do nothing */
2673 0 : r = 0;
2674 : else {
2675 : Unit **new_array;
2676 :
2677 : /* Allocate a new array */
2678 0 : new_array = new(Unit*, n + 2);
2679 0 : if (!new_array)
2680 0 : return -ENOMEM;
2681 :
2682 0 : memcpy_safe(new_array, array, sizeof(Unit*) * n);
2683 0 : new_array[n] = u;
2684 0 : new_array[n+1] = NULL;
2685 :
2686 : /* Add or replace the old array */
2687 0 : r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2688 0 : if (r < 0) {
2689 0 : free(new_array);
2690 0 : return r;
2691 : }
2692 :
2693 0 : free(array);
2694 : }
2695 6 : } else if (r < 0)
2696 0 : return r;
2697 :
2698 6 : r = set_put(u->pids, PID_TO_PTR(pid));
2699 6 : if (r < 0)
2700 0 : return r;
2701 :
2702 6 : return 0;
2703 : }
2704 :
2705 6 : void unit_unwatch_pid(Unit *u, pid_t pid) {
2706 : Unit **array;
2707 :
2708 6 : assert(u);
2709 6 : assert(pid_is_valid(pid));
2710 :
2711 : /* First let's drop the unit in case it's keyed as "pid". */
2712 6 : (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2713 :
2714 : /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2715 6 : array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2716 6 : if (array) {
2717 0 : size_t n, m = 0;
2718 :
2719 : /* Let's iterate through the array, dropping our own entry */
2720 0 : for (n = 0; array[n]; n++)
2721 0 : if (array[n] != u)
2722 0 : array[m++] = array[n];
2723 0 : array[m] = NULL;
2724 :
2725 0 : if (m == 0) {
2726 : /* The array is now empty, remove the entire entry */
2727 0 : assert(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2728 0 : free(array);
2729 : }
2730 : }
2731 :
2732 6 : (void) set_remove(u->pids, PID_TO_PTR(pid));
2733 6 : }
2734 :
2735 2160 : void unit_unwatch_all_pids(Unit *u) {
2736 2160 : assert(u);
2737 :
2738 2160 : while (!set_isempty(u->pids))
2739 0 : unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2740 :
2741 2160 : u->pids = set_free(u->pids);
2742 2160 : }
2743 :
2744 0 : static void unit_tidy_watch_pids(Unit *u) {
2745 : pid_t except1, except2;
2746 : Iterator i;
2747 : void *e;
2748 :
2749 0 : assert(u);
2750 :
2751 : /* Cleans dead PIDs from our list */
2752 :
2753 0 : except1 = unit_main_pid(u);
2754 0 : except2 = unit_control_pid(u);
2755 :
2756 0 : SET_FOREACH(e, u->pids, i) {
2757 0 : pid_t pid = PTR_TO_PID(e);
2758 :
2759 0 : if (pid == except1 || pid == except2)
2760 0 : continue;
2761 :
2762 0 : if (!pid_is_unwaited(pid))
2763 0 : unit_unwatch_pid(u, pid);
2764 : }
2765 0 : }
2766 :
2767 0 : static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2768 0 : Unit *u = userdata;
2769 :
2770 0 : assert(s);
2771 0 : assert(u);
2772 :
2773 0 : unit_tidy_watch_pids(u);
2774 0 : unit_watch_all_pids(u);
2775 :
2776 : /* If the PID set is empty now, then let's finish this off. */
2777 0 : unit_synthesize_cgroup_empty_event(u);
2778 :
2779 0 : return 0;
2780 : }
2781 :
2782 11 : int unit_enqueue_rewatch_pids(Unit *u) {
2783 : int r;
2784 :
2785 11 : assert(u);
2786 :
2787 11 : if (!u->cgroup_path)
2788 11 : return -ENOENT;
2789 :
2790 0 : r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2791 0 : if (r < 0)
2792 0 : return r;
2793 0 : if (r > 0) /* On unified we can use proper notifications */
2794 0 : return 0;
2795 :
2796 : /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2797 : * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2798 : * involves issuing kill(pid, 0) on all processes we watch. */
2799 :
2800 0 : if (!u->rewatch_pids_event_source) {
2801 0 : _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2802 :
2803 0 : r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2804 0 : if (r < 0)
2805 0 : return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2806 :
2807 0 : r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2808 0 : if (r < 0)
2809 0 : return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: m");
2810 :
2811 0 : (void) sd_event_source_set_description(s, "tidy-watch-pids");
2812 :
2813 0 : u->rewatch_pids_event_source = TAKE_PTR(s);
2814 : }
2815 :
2816 0 : r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2817 0 : if (r < 0)
2818 0 : return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2819 :
2820 0 : return 0;
2821 : }
2822 :
2823 2160 : void unit_dequeue_rewatch_pids(Unit *u) {
2824 : int r;
2825 2160 : assert(u);
2826 :
2827 2160 : if (!u->rewatch_pids_event_source)
2828 2160 : return;
2829 :
2830 0 : r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2831 0 : if (r < 0)
2832 0 : log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2833 :
2834 0 : u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2835 : }
2836 :
2837 311 : bool unit_job_is_applicable(Unit *u, JobType j) {
2838 311 : assert(u);
2839 311 : assert(j >= 0 && j < _JOB_TYPE_MAX);
2840 :
2841 311 : switch (j) {
2842 :
2843 175 : case JOB_VERIFY_ACTIVE:
2844 : case JOB_START:
2845 : case JOB_NOP:
2846 : /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2847 : * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2848 : * jobs for it. */
2849 175 : return true;
2850 :
2851 135 : case JOB_STOP:
2852 : /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2853 : * external events), hence it makes no sense to permit enqueing such a request either. */
2854 135 : return !u->perpetual;
2855 :
2856 1 : case JOB_RESTART:
2857 : case JOB_TRY_RESTART:
2858 1 : return unit_can_stop(u) && unit_can_start(u);
2859 :
2860 0 : case JOB_RELOAD:
2861 : case JOB_TRY_RELOAD:
2862 0 : return unit_can_reload(u);
2863 :
2864 0 : case JOB_RELOAD_OR_START:
2865 0 : return unit_can_reload(u) && unit_can_start(u);
2866 :
2867 0 : default:
2868 0 : assert_not_reached("Invalid job type");
2869 : }
2870 : }
2871 :
2872 0 : static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2873 0 : assert(u);
2874 :
2875 : /* Only warn about some unit types */
2876 0 : if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2877 0 : return;
2878 :
2879 0 : if (streq_ptr(u->id, other))
2880 0 : log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2881 : else
2882 0 : log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2883 : }
2884 :
2885 5760 : static int unit_add_dependency_hashmap(
2886 : Hashmap **h,
2887 : Unit *other,
2888 : UnitDependencyMask origin_mask,
2889 : UnitDependencyMask destination_mask) {
2890 :
2891 : UnitDependencyInfo info;
2892 : int r;
2893 :
2894 5760 : assert(h);
2895 5760 : assert(other);
2896 5760 : assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2897 5760 : assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2898 5760 : assert(origin_mask > 0 || destination_mask > 0);
2899 :
2900 5760 : r = hashmap_ensure_allocated(h, NULL);
2901 5760 : if (r < 0)
2902 0 : return r;
2903 :
2904 : assert_cc(sizeof(void*) == sizeof(info));
2905 :
2906 5760 : info.data = hashmap_get(*h, other);
2907 5760 : if (info.data) {
2908 : /* Entry already exists. Add in our mask. */
2909 :
2910 1286 : if (FLAGS_SET(origin_mask, info.origin_mask) &&
2911 1205 : FLAGS_SET(destination_mask, info.destination_mask))
2912 1124 : return 0; /* NOP */
2913 :
2914 162 : info.origin_mask |= origin_mask;
2915 162 : info.destination_mask |= destination_mask;
2916 :
2917 162 : r = hashmap_update(*h, other, info.data);
2918 : } else {
2919 4474 : info = (UnitDependencyInfo) {
2920 : .origin_mask = origin_mask,
2921 : .destination_mask = destination_mask,
2922 : };
2923 :
2924 4474 : r = hashmap_put(*h, other, info.data);
2925 : }
2926 4636 : if (r < 0)
2927 0 : return r;
2928 :
2929 4636 : return 1;
2930 : }
2931 :
2932 1440 : int unit_add_dependency(
2933 : Unit *u,
2934 : UnitDependency d,
2935 : Unit *other,
2936 : bool add_reference,
2937 : UnitDependencyMask mask) {
2938 :
2939 : static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2940 : [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2941 : [UNIT_WANTS] = UNIT_WANTED_BY,
2942 : [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2943 : [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2944 : [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2945 : [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2946 : [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2947 : [UNIT_WANTED_BY] = UNIT_WANTS,
2948 : [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2949 : [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2950 : [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2951 : [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2952 : [UNIT_BEFORE] = UNIT_AFTER,
2953 : [UNIT_AFTER] = UNIT_BEFORE,
2954 : [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2955 : [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2956 : [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2957 : [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2958 : [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2959 : [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2960 : [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2961 : [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2962 : };
2963 1440 : Unit *original_u = u, *original_other = other;
2964 : int r;
2965 :
2966 1440 : assert(u);
2967 1440 : assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2968 1440 : assert(other);
2969 :
2970 1440 : u = unit_follow_merge(u);
2971 1440 : other = unit_follow_merge(other);
2972 :
2973 : /* We won't allow dependencies on ourselves. We will not
2974 : * consider them an error however. */
2975 1440 : if (u == other) {
2976 0 : maybe_warn_about_dependency(original_u, original_other->id, d);
2977 0 : return 0;
2978 : }
2979 :
2980 1440 : if ((d == UNIT_BEFORE && other->type == UNIT_DEVICE) ||
2981 780 : (d == UNIT_AFTER && u->type == UNIT_DEVICE)) {
2982 0 : log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2983 0 : return 0;
2984 : }
2985 :
2986 1440 : r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2987 1440 : if (r < 0)
2988 0 : return r;
2989 :
2990 1440 : if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2991 1440 : r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2992 1440 : if (r < 0)
2993 0 : return r;
2994 : }
2995 :
2996 1440 : if (add_reference) {
2997 1440 : r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2998 1440 : if (r < 0)
2999 0 : return r;
3000 :
3001 1440 : r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
3002 1440 : if (r < 0)
3003 0 : return r;
3004 : }
3005 :
3006 1440 : unit_add_to_dbus_queue(u);
3007 1440 : return 0;
3008 : }
3009 :
3010 402 : int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3011 : int r;
3012 :
3013 402 : assert(u);
3014 :
3015 402 : r = unit_add_dependency(u, d, other, add_reference, mask);
3016 402 : if (r < 0)
3017 0 : return r;
3018 :
3019 402 : return unit_add_dependency(u, e, other, add_reference, mask);
3020 : }
3021 :
3022 386 : static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3023 : int r;
3024 :
3025 386 : assert(u);
3026 386 : assert(name);
3027 386 : assert(buf);
3028 386 : assert(ret);
3029 :
3030 386 : if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3031 386 : *buf = NULL;
3032 386 : *ret = name;
3033 386 : return 0;
3034 : }
3035 :
3036 0 : if (u->instance)
3037 0 : r = unit_name_replace_instance(name, u->instance, buf);
3038 : else {
3039 0 : _cleanup_free_ char *i = NULL;
3040 :
3041 0 : r = unit_name_to_prefix(u->id, &i);
3042 0 : if (r < 0)
3043 0 : return r;
3044 :
3045 0 : r = unit_name_replace_instance(name, i, buf);
3046 : }
3047 0 : if (r < 0)
3048 0 : return r;
3049 :
3050 0 : *ret = *buf;
3051 0 : return 0;
3052 : }
3053 :
3054 305 : int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3055 305 : _cleanup_free_ char *buf = NULL;
3056 : Unit *other;
3057 : int r;
3058 :
3059 305 : assert(u);
3060 305 : assert(name);
3061 :
3062 305 : r = resolve_template(u, name, &buf, &name);
3063 305 : if (r < 0)
3064 0 : return r;
3065 :
3066 305 : r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3067 305 : if (r < 0)
3068 0 : return r;
3069 :
3070 305 : return unit_add_dependency(u, d, other, add_reference, mask);
3071 : }
3072 :
3073 81 : int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3074 81 : _cleanup_free_ char *buf = NULL;
3075 : Unit *other;
3076 : int r;
3077 :
3078 81 : assert(u);
3079 81 : assert(name);
3080 :
3081 81 : r = resolve_template(u, name, &buf, &name);
3082 81 : if (r < 0)
3083 0 : return r;
3084 :
3085 81 : r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3086 81 : if (r < 0)
3087 0 : return r;
3088 :
3089 81 : return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3090 : }
3091 :
3092 5 : int set_unit_path(const char *p) {
3093 : /* This is mostly for debug purposes */
3094 5 : if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3095 0 : return -errno;
3096 :
3097 5 : return 0;
3098 : }
3099 :
3100 0 : char *unit_dbus_path(Unit *u) {
3101 0 : assert(u);
3102 :
3103 0 : if (!u->id)
3104 0 : return NULL;
3105 :
3106 0 : return unit_dbus_path_from_name(u->id);
3107 : }
3108 :
3109 0 : char *unit_dbus_path_invocation_id(Unit *u) {
3110 0 : assert(u);
3111 :
3112 0 : if (sd_id128_is_null(u->invocation_id))
3113 0 : return NULL;
3114 :
3115 0 : return unit_dbus_path_from_name(u->invocation_id_string);
3116 : }
3117 :
3118 275 : int unit_set_slice(Unit *u, Unit *slice) {
3119 275 : assert(u);
3120 275 : assert(slice);
3121 :
3122 : /* Sets the unit slice if it has not been set before. Is extra
3123 : * careful, to only allow this for units that actually have a
3124 : * cgroup context. Also, we don't allow to set this for slices
3125 : * (since the parent slice is derived from the name). Make
3126 : * sure the unit we set is actually a slice. */
3127 :
3128 275 : if (!UNIT_HAS_CGROUP_CONTEXT(u))
3129 0 : return -EOPNOTSUPP;
3130 :
3131 275 : if (u->type == UNIT_SLICE)
3132 0 : return -EINVAL;
3133 :
3134 275 : if (unit_active_state(u) != UNIT_INACTIVE)
3135 0 : return -EBUSY;
3136 :
3137 275 : if (slice->type != UNIT_SLICE)
3138 0 : return -EINVAL;
3139 :
3140 275 : if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3141 11 : !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3142 0 : return -EPERM;
3143 :
3144 275 : if (UNIT_DEREF(u->slice) == slice)
3145 0 : return 0;
3146 :
3147 : /* Disallow slice changes if @u is already bound to cgroups */
3148 275 : if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3149 0 : return -EBUSY;
3150 :
3151 275 : unit_ref_set(&u->slice, u, slice);
3152 275 : return 1;
3153 : }
3154 :
3155 286 : int unit_set_default_slice(Unit *u) {
3156 : const char *slice_name;
3157 : Unit *slice;
3158 : int r;
3159 :
3160 286 : assert(u);
3161 :
3162 286 : if (UNIT_ISSET(u->slice))
3163 21 : return 0;
3164 :
3165 265 : if (u->instance) {
3166 0 : _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3167 :
3168 : /* Implicitly place all instantiated units in their
3169 : * own per-template slice */
3170 :
3171 0 : r = unit_name_to_prefix(u->id, &prefix);
3172 0 : if (r < 0)
3173 0 : return r;
3174 :
3175 : /* The prefix is already escaped, but it might include
3176 : * "-" which has a special meaning for slice units,
3177 : * hence escape it here extra. */
3178 0 : escaped = unit_name_escape(prefix);
3179 0 : if (!escaped)
3180 0 : return -ENOMEM;
3181 :
3182 0 : if (MANAGER_IS_SYSTEM(u->manager))
3183 0 : slice_name = strjoina("system-", escaped, ".slice");
3184 : else
3185 0 : slice_name = strjoina(escaped, ".slice");
3186 : } else
3187 265 : slice_name =
3188 0 : MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3189 : ? SPECIAL_SYSTEM_SLICE
3190 265 : : SPECIAL_ROOT_SLICE;
3191 :
3192 265 : r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3193 265 : if (r < 0)
3194 0 : return r;
3195 :
3196 265 : return unit_set_slice(u, slice);
3197 : }
3198 :
3199 1190 : const char *unit_slice_name(Unit *u) {
3200 1190 : assert(u);
3201 :
3202 1190 : if (!UNIT_ISSET(u->slice))
3203 1014 : return NULL;
3204 :
3205 176 : return UNIT_DEREF(u->slice)->id;
3206 : }
3207 :
3208 6 : int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3209 6 : _cleanup_free_ char *t = NULL;
3210 : int r;
3211 :
3212 6 : assert(u);
3213 6 : assert(type);
3214 6 : assert(_found);
3215 :
3216 6 : r = unit_name_change_suffix(u->id, type, &t);
3217 6 : if (r < 0)
3218 0 : return r;
3219 6 : if (unit_has_name(u, t))
3220 0 : return -EINVAL;
3221 :
3222 6 : r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3223 6 : assert(r < 0 || *_found != u);
3224 6 : return r;
3225 : }
3226 :
3227 0 : static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3228 : const char *name, *old_owner, *new_owner;
3229 0 : Unit *u = userdata;
3230 : int r;
3231 :
3232 0 : assert(message);
3233 0 : assert(u);
3234 :
3235 0 : r = sd_bus_message_read(message, "sss", &name, &old_owner, &new_owner);
3236 0 : if (r < 0) {
3237 0 : bus_log_parse_error(r);
3238 0 : return 0;
3239 : }
3240 :
3241 0 : old_owner = empty_to_null(old_owner);
3242 0 : new_owner = empty_to_null(new_owner);
3243 :
3244 0 : if (UNIT_VTABLE(u)->bus_name_owner_change)
3245 0 : UNIT_VTABLE(u)->bus_name_owner_change(u, old_owner, new_owner);
3246 :
3247 0 : return 0;
3248 : }
3249 :
3250 0 : static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3251 : const sd_bus_error *e;
3252 : const char *new_owner;
3253 0 : Unit *u = userdata;
3254 : int r;
3255 :
3256 0 : assert(message);
3257 0 : assert(u);
3258 :
3259 0 : u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3260 :
3261 0 : if (sd_bus_error_is_set(error)) {
3262 0 : log_error("Failed to get name owner from bus: %s", error->message);
3263 0 : return 0;
3264 : }
3265 :
3266 0 : e = sd_bus_message_get_error(message);
3267 0 : if (sd_bus_error_has_name(e, "org.freedesktop.DBus.Error.NameHasNoOwner"))
3268 0 : return 0;
3269 :
3270 0 : if (e) {
3271 0 : log_error("Unexpected error response from GetNameOwner: %s", e->message);
3272 0 : return 0;
3273 : }
3274 :
3275 0 : r = sd_bus_message_read(message, "s", &new_owner);
3276 0 : if (r < 0) {
3277 0 : bus_log_parse_error(r);
3278 0 : return 0;
3279 : }
3280 :
3281 0 : new_owner = empty_to_null(new_owner);
3282 :
3283 0 : if (UNIT_VTABLE(u)->bus_name_owner_change)
3284 0 : UNIT_VTABLE(u)->bus_name_owner_change(u, NULL, new_owner);
3285 :
3286 0 : return 0;
3287 : }
3288 :
3289 0 : int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3290 : const char *match;
3291 :
3292 0 : assert(u);
3293 0 : assert(bus);
3294 0 : assert(name);
3295 :
3296 0 : if (u->match_bus_slot)
3297 0 : return -EBUSY;
3298 :
3299 0 : match = strjoina("type='signal',"
3300 : "sender='org.freedesktop.DBus',"
3301 : "path='/org/freedesktop/DBus',"
3302 : "interface='org.freedesktop.DBus',"
3303 : "member='NameOwnerChanged',"
3304 : "arg0='", name, "'");
3305 :
3306 0 : int r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3307 0 : if (r < 0)
3308 0 : return r;
3309 :
3310 0 : return sd_bus_call_method_async(bus,
3311 : &u->get_name_owner_slot,
3312 : "org.freedesktop.DBus",
3313 : "/org/freedesktop/DBus",
3314 : "org.freedesktop.DBus",
3315 : "GetNameOwner",
3316 : get_name_owner_handler,
3317 : u,
3318 : "s", name);
3319 : }
3320 :
3321 0 : int unit_watch_bus_name(Unit *u, const char *name) {
3322 : int r;
3323 :
3324 0 : assert(u);
3325 0 : assert(name);
3326 :
3327 : /* Watch a specific name on the bus. We only support one unit
3328 : * watching each name for now. */
3329 :
3330 0 : if (u->manager->api_bus) {
3331 : /* If the bus is already available, install the match directly.
3332 : * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3333 0 : r = unit_install_bus_match(u, u->manager->api_bus, name);
3334 0 : if (r < 0)
3335 0 : return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3336 : }
3337 :
3338 0 : r = hashmap_put(u->manager->watch_bus, name, u);
3339 0 : if (r < 0) {
3340 0 : u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3341 0 : return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3342 : }
3343 :
3344 0 : return 0;
3345 : }
3346 :
3347 0 : void unit_unwatch_bus_name(Unit *u, const char *name) {
3348 0 : assert(u);
3349 0 : assert(name);
3350 :
3351 0 : (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3352 0 : u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3353 0 : u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3354 0 : }
3355 :
3356 0 : bool unit_can_serialize(Unit *u) {
3357 0 : assert(u);
3358 :
3359 0 : return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3360 : }
3361 :
3362 0 : static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3363 0 : _cleanup_free_ char *s = NULL;
3364 : int r;
3365 :
3366 0 : assert(f);
3367 0 : assert(key);
3368 :
3369 0 : if (mask == 0)
3370 0 : return 0;
3371 :
3372 0 : r = cg_mask_to_string(mask, &s);
3373 0 : if (r < 0)
3374 0 : return log_error_errno(r, "Failed to format cgroup mask: %m");
3375 :
3376 0 : return serialize_item(f, key, s);
3377 : }
3378 :
3379 : static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3380 : [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3381 : [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3382 : [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3383 : [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3384 : };
3385 :
3386 : static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3387 : [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3388 : [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3389 : [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3390 : [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3391 : };
3392 :
3393 : static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3394 : [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3395 : [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3396 : [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3397 : [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3398 : };
3399 :
3400 0 : int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3401 : CGroupIPAccountingMetric m;
3402 : int r;
3403 :
3404 0 : assert(u);
3405 0 : assert(f);
3406 0 : assert(fds);
3407 :
3408 0 : if (unit_can_serialize(u)) {
3409 0 : r = UNIT_VTABLE(u)->serialize(u, f, fds);
3410 0 : if (r < 0)
3411 0 : return r;
3412 : }
3413 :
3414 0 : (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3415 :
3416 0 : (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3417 0 : (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3418 0 : (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3419 0 : (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3420 :
3421 0 : (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3422 0 : (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3423 :
3424 0 : if (dual_timestamp_is_set(&u->condition_timestamp))
3425 0 : (void) serialize_bool(f, "condition-result", u->condition_result);
3426 :
3427 0 : if (dual_timestamp_is_set(&u->assert_timestamp))
3428 0 : (void) serialize_bool(f, "assert-result", u->assert_result);
3429 :
3430 0 : (void) serialize_bool(f, "transient", u->transient);
3431 0 : (void) serialize_bool(f, "in-audit", u->in_audit);
3432 :
3433 0 : (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3434 0 : (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3435 0 : (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3436 0 : (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_rate_limit_interval);
3437 0 : (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_rate_limit_burst);
3438 :
3439 0 : (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3440 0 : if (u->cpu_usage_last != NSEC_INFINITY)
3441 0 : (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3442 :
3443 0 : if (u->oom_kill_last > 0)
3444 0 : (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3445 :
3446 0 : for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3447 0 : (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3448 :
3449 0 : if (u->io_accounting_last[im] != UINT64_MAX)
3450 0 : (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3451 : }
3452 :
3453 0 : if (u->cgroup_path)
3454 0 : (void) serialize_item(f, "cgroup", u->cgroup_path);
3455 :
3456 0 : (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3457 0 : (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3458 0 : (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3459 0 : (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3460 :
3461 0 : if (uid_is_valid(u->ref_uid))
3462 0 : (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3463 0 : if (gid_is_valid(u->ref_gid))
3464 0 : (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3465 :
3466 0 : if (!sd_id128_is_null(u->invocation_id))
3467 0 : (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3468 :
3469 0 : bus_track_serialize(u->bus_track, f, "ref");
3470 :
3471 0 : for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3472 : uint64_t v;
3473 :
3474 0 : r = unit_get_ip_accounting(u, m, &v);
3475 0 : if (r >= 0)
3476 0 : (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3477 : }
3478 :
3479 0 : if (serialize_jobs) {
3480 0 : if (u->job) {
3481 0 : fputs("job\n", f);
3482 0 : job_serialize(u->job, f);
3483 : }
3484 :
3485 0 : if (u->nop_job) {
3486 0 : fputs("job\n", f);
3487 0 : job_serialize(u->nop_job, f);
3488 : }
3489 : }
3490 :
3491 : /* End marker */
3492 0 : fputc('\n', f);
3493 0 : return 0;
3494 : }
3495 :
3496 0 : static int unit_deserialize_job(Unit *u, FILE *f) {
3497 0 : _cleanup_(job_freep) Job *j = NULL;
3498 : int r;
3499 :
3500 0 : assert(u);
3501 0 : assert(f);
3502 :
3503 0 : j = job_new_raw(u);
3504 0 : if (!j)
3505 0 : return log_oom();
3506 :
3507 0 : r = job_deserialize(j, f);
3508 0 : if (r < 0)
3509 0 : return r;
3510 :
3511 0 : r = job_install_deserialized(j);
3512 0 : if (r < 0)
3513 0 : return r;
3514 :
3515 0 : TAKE_PTR(j);
3516 0 : return 0;
3517 : }
3518 :
3519 0 : int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3520 : int r;
3521 :
3522 0 : assert(u);
3523 0 : assert(f);
3524 0 : assert(fds);
3525 :
3526 0 : for (;;) {
3527 0 : _cleanup_free_ char *line = NULL;
3528 : char *l, *v;
3529 : ssize_t m;
3530 : size_t k;
3531 :
3532 0 : r = read_line(f, LONG_LINE_MAX, &line);
3533 0 : if (r < 0)
3534 0 : return log_error_errno(r, "Failed to read serialization line: %m");
3535 0 : if (r == 0) /* eof */
3536 0 : break;
3537 :
3538 0 : l = strstrip(line);
3539 0 : if (isempty(l)) /* End marker */
3540 0 : break;
3541 :
3542 0 : k = strcspn(l, "=");
3543 :
3544 0 : if (l[k] == '=') {
3545 0 : l[k] = 0;
3546 0 : v = l+k+1;
3547 : } else
3548 0 : v = l+k;
3549 :
3550 0 : if (streq(l, "job")) {
3551 0 : if (v[0] == '\0') {
3552 : /* New-style serialized job */
3553 0 : r = unit_deserialize_job(u, f);
3554 0 : if (r < 0)
3555 0 : return r;
3556 : } else /* Legacy for pre-44 */
3557 0 : log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3558 0 : continue;
3559 0 : } else if (streq(l, "state-change-timestamp")) {
3560 0 : (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3561 0 : continue;
3562 0 : } else if (streq(l, "inactive-exit-timestamp")) {
3563 0 : (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3564 0 : continue;
3565 0 : } else if (streq(l, "active-enter-timestamp")) {
3566 0 : (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3567 0 : continue;
3568 0 : } else if (streq(l, "active-exit-timestamp")) {
3569 0 : (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3570 0 : continue;
3571 0 : } else if (streq(l, "inactive-enter-timestamp")) {
3572 0 : (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3573 0 : continue;
3574 0 : } else if (streq(l, "condition-timestamp")) {
3575 0 : (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3576 0 : continue;
3577 0 : } else if (streq(l, "assert-timestamp")) {
3578 0 : (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3579 0 : continue;
3580 0 : } else if (streq(l, "condition-result")) {
3581 :
3582 0 : r = parse_boolean(v);
3583 0 : if (r < 0)
3584 0 : log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3585 : else
3586 0 : u->condition_result = r;
3587 :
3588 0 : continue;
3589 :
3590 0 : } else if (streq(l, "assert-result")) {
3591 :
3592 0 : r = parse_boolean(v);
3593 0 : if (r < 0)
3594 0 : log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3595 : else
3596 0 : u->assert_result = r;
3597 :
3598 0 : continue;
3599 :
3600 0 : } else if (streq(l, "transient")) {
3601 :
3602 0 : r = parse_boolean(v);
3603 0 : if (r < 0)
3604 0 : log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3605 : else
3606 0 : u->transient = r;
3607 :
3608 0 : continue;
3609 :
3610 0 : } else if (streq(l, "in-audit")) {
3611 :
3612 0 : r = parse_boolean(v);
3613 0 : if (r < 0)
3614 0 : log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3615 : else
3616 0 : u->in_audit = r;
3617 :
3618 0 : continue;
3619 :
3620 0 : } else if (streq(l, "exported-invocation-id")) {
3621 :
3622 0 : r = parse_boolean(v);
3623 0 : if (r < 0)
3624 0 : log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3625 : else
3626 0 : u->exported_invocation_id = r;
3627 :
3628 0 : continue;
3629 :
3630 0 : } else if (streq(l, "exported-log-level-max")) {
3631 :
3632 0 : r = parse_boolean(v);
3633 0 : if (r < 0)
3634 0 : log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3635 : else
3636 0 : u->exported_log_level_max = r;
3637 :
3638 0 : continue;
3639 :
3640 0 : } else if (streq(l, "exported-log-extra-fields")) {
3641 :
3642 0 : r = parse_boolean(v);
3643 0 : if (r < 0)
3644 0 : log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3645 : else
3646 0 : u->exported_log_extra_fields = r;
3647 :
3648 0 : continue;
3649 :
3650 0 : } else if (streq(l, "exported-log-rate-limit-interval")) {
3651 :
3652 0 : r = parse_boolean(v);
3653 0 : if (r < 0)
3654 0 : log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3655 : else
3656 0 : u->exported_log_rate_limit_interval = r;
3657 :
3658 0 : continue;
3659 :
3660 0 : } else if (streq(l, "exported-log-rate-limit-burst")) {
3661 :
3662 0 : r = parse_boolean(v);
3663 0 : if (r < 0)
3664 0 : log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3665 : else
3666 0 : u->exported_log_rate_limit_burst = r;
3667 :
3668 0 : continue;
3669 :
3670 0 : } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3671 :
3672 0 : r = safe_atou64(v, &u->cpu_usage_base);
3673 0 : if (r < 0)
3674 0 : log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3675 :
3676 0 : continue;
3677 :
3678 0 : } else if (streq(l, "cpu-usage-last")) {
3679 :
3680 0 : r = safe_atou64(v, &u->cpu_usage_last);
3681 0 : if (r < 0)
3682 0 : log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3683 :
3684 0 : continue;
3685 :
3686 0 : } else if (streq(l, "oom-kill-last")) {
3687 :
3688 0 : r = safe_atou64(v, &u->oom_kill_last);
3689 0 : if (r < 0)
3690 0 : log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3691 :
3692 0 : continue;
3693 :
3694 0 : } else if (streq(l, "cgroup")) {
3695 :
3696 0 : r = unit_set_cgroup_path(u, v);
3697 0 : if (r < 0)
3698 0 : log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3699 :
3700 0 : (void) unit_watch_cgroup(u);
3701 0 : (void) unit_watch_cgroup_memory(u);
3702 :
3703 0 : continue;
3704 0 : } else if (streq(l, "cgroup-realized")) {
3705 : int b;
3706 :
3707 0 : b = parse_boolean(v);
3708 0 : if (b < 0)
3709 0 : log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3710 : else
3711 0 : u->cgroup_realized = b;
3712 :
3713 0 : continue;
3714 :
3715 0 : } else if (streq(l, "cgroup-realized-mask")) {
3716 :
3717 0 : r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3718 0 : if (r < 0)
3719 0 : log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3720 0 : continue;
3721 :
3722 0 : } else if (streq(l, "cgroup-enabled-mask")) {
3723 :
3724 0 : r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3725 0 : if (r < 0)
3726 0 : log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3727 0 : continue;
3728 :
3729 0 : } else if (streq(l, "cgroup-invalidated-mask")) {
3730 :
3731 0 : r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3732 0 : if (r < 0)
3733 0 : log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3734 0 : continue;
3735 :
3736 0 : } else if (streq(l, "ref-uid")) {
3737 : uid_t uid;
3738 :
3739 0 : r = parse_uid(v, &uid);
3740 0 : if (r < 0)
3741 0 : log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3742 : else
3743 0 : unit_ref_uid_gid(u, uid, GID_INVALID);
3744 :
3745 0 : continue;
3746 :
3747 0 : } else if (streq(l, "ref-gid")) {
3748 : gid_t gid;
3749 :
3750 0 : r = parse_gid(v, &gid);
3751 0 : if (r < 0)
3752 0 : log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3753 : else
3754 0 : unit_ref_uid_gid(u, UID_INVALID, gid);
3755 :
3756 0 : continue;
3757 :
3758 0 : } else if (streq(l, "ref")) {
3759 :
3760 0 : r = strv_extend(&u->deserialized_refs, v);
3761 0 : if (r < 0)
3762 0 : return log_oom();
3763 :
3764 0 : continue;
3765 0 : } else if (streq(l, "invocation-id")) {
3766 : sd_id128_t id;
3767 :
3768 0 : r = sd_id128_from_string(v, &id);
3769 0 : if (r < 0)
3770 0 : log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3771 : else {
3772 0 : r = unit_set_invocation_id(u, id);
3773 0 : if (r < 0)
3774 0 : log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3775 : }
3776 :
3777 0 : continue;
3778 : }
3779 :
3780 : /* Check if this is an IP accounting metric serialization field */
3781 0 : m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3782 0 : if (m >= 0) {
3783 : uint64_t c;
3784 :
3785 0 : r = safe_atou64(v, &c);
3786 0 : if (r < 0)
3787 0 : log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3788 : else
3789 0 : u->ip_accounting_extra[m] = c;
3790 0 : continue;
3791 : }
3792 :
3793 0 : m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3794 0 : if (m >= 0) {
3795 : uint64_t c;
3796 :
3797 0 : r = safe_atou64(v, &c);
3798 0 : if (r < 0)
3799 0 : log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3800 : else
3801 0 : u->io_accounting_base[m] = c;
3802 0 : continue;
3803 : }
3804 :
3805 0 : m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3806 0 : if (m >= 0) {
3807 : uint64_t c;
3808 :
3809 0 : r = safe_atou64(v, &c);
3810 0 : if (r < 0)
3811 0 : log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3812 : else
3813 0 : u->io_accounting_last[m] = c;
3814 0 : continue;
3815 : }
3816 :
3817 0 : if (unit_can_serialize(u)) {
3818 0 : r = exec_runtime_deserialize_compat(u, l, v, fds);
3819 0 : if (r < 0) {
3820 0 : log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3821 0 : continue;
3822 : }
3823 :
3824 : /* Returns positive if key was handled by the call */
3825 0 : if (r > 0)
3826 0 : continue;
3827 :
3828 0 : r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3829 0 : if (r < 0)
3830 0 : log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3831 : }
3832 : }
3833 :
3834 : /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3835 : * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3836 : * before 228 where the base for timeouts was not persistent across reboots. */
3837 :
3838 0 : if (!dual_timestamp_is_set(&u->state_change_timestamp))
3839 0 : dual_timestamp_get(&u->state_change_timestamp);
3840 :
3841 : /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3842 : * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3843 0 : unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3844 0 : unit_invalidate_cgroup_bpf(u);
3845 :
3846 0 : return 0;
3847 : }
3848 :
3849 0 : int unit_deserialize_skip(FILE *f) {
3850 : int r;
3851 0 : assert(f);
3852 :
3853 : /* Skip serialized data for this unit. We don't know what it is. */
3854 :
3855 0 : for (;;) {
3856 0 : _cleanup_free_ char *line = NULL;
3857 : char *l;
3858 :
3859 0 : r = read_line(f, LONG_LINE_MAX, &line);
3860 0 : if (r < 0)
3861 0 : return log_error_errno(r, "Failed to read serialization line: %m");
3862 0 : if (r == 0)
3863 0 : return 0;
3864 :
3865 0 : l = strstrip(line);
3866 :
3867 : /* End marker */
3868 0 : if (isempty(l))
3869 0 : return 1;
3870 : }
3871 : }
3872 :
3873 33 : int unit_add_node_dependency(Unit *u, const char *what, bool wants, UnitDependency dep, UnitDependencyMask mask) {
3874 : Unit *device;
3875 33 : _cleanup_free_ char *e = NULL;
3876 : int r;
3877 :
3878 33 : assert(u);
3879 :
3880 : /* Adds in links to the device node that this unit is based on */
3881 33 : if (isempty(what))
3882 0 : return 0;
3883 :
3884 33 : if (!is_device_path(what))
3885 0 : return 0;
3886 :
3887 : /* When device units aren't supported (such as in a
3888 : * container), don't create dependencies on them. */
3889 33 : if (!unit_type_supported(UNIT_DEVICE))
3890 0 : return 0;
3891 :
3892 33 : r = unit_name_from_path(what, ".device", &e);
3893 33 : if (r < 0)
3894 0 : return r;
3895 :
3896 33 : r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3897 33 : if (r < 0)
3898 0 : return r;
3899 :
3900 33 : if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3901 0 : dep = UNIT_BINDS_TO;
3902 :
3903 33 : r = unit_add_two_dependencies(u, UNIT_AFTER,
3904 33 : MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3905 : device, true, mask);
3906 33 : if (r < 0)
3907 0 : return r;
3908 :
3909 33 : if (wants) {
3910 0 : r = unit_add_dependency(device, UNIT_WANTS, u, false, mask);
3911 0 : if (r < 0)
3912 0 : return r;
3913 : }
3914 :
3915 33 : return 0;
3916 : }
3917 :
3918 1969 : int unit_coldplug(Unit *u) {
3919 1969 : int r = 0, q;
3920 : char **i;
3921 :
3922 1969 : assert(u);
3923 :
3924 : /* Make sure we don't enter a loop, when coldplugging recursively. */
3925 1969 : if (u->coldplugged)
3926 0 : return 0;
3927 :
3928 1969 : u->coldplugged = true;
3929 :
3930 1969 : STRV_FOREACH(i, u->deserialized_refs) {
3931 0 : q = bus_unit_track_add_name(u, *i);
3932 0 : if (q < 0 && r >= 0)
3933 0 : r = q;
3934 : }
3935 1969 : u->deserialized_refs = strv_free(u->deserialized_refs);
3936 :
3937 1969 : if (UNIT_VTABLE(u)->coldplug) {
3938 1969 : q = UNIT_VTABLE(u)->coldplug(u);
3939 1969 : if (q < 0 && r >= 0)
3940 0 : r = q;
3941 : }
3942 :
3943 1969 : if (u->job) {
3944 0 : q = job_coldplug(u->job);
3945 0 : if (q < 0 && r >= 0)
3946 0 : r = q;
3947 : }
3948 :
3949 1969 : return r;
3950 : }
3951 :
3952 1969 : void unit_catchup(Unit *u) {
3953 1969 : assert(u);
3954 :
3955 1969 : if (UNIT_VTABLE(u)->catchup)
3956 1452 : UNIT_VTABLE(u)->catchup(u);
3957 1969 : }
3958 :
3959 2380 : static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3960 : struct stat st;
3961 :
3962 2380 : if (!path)
3963 2222 : return false;
3964 :
3965 : /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3966 : * are never out-of-date. */
3967 158 : if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3968 84 : return false;
3969 :
3970 74 : if (stat(path, &st) < 0)
3971 : /* What, cannot access this anymore? */
3972 0 : return true;
3973 :
3974 74 : if (path_masked)
3975 : /* For masked files check if they are still so */
3976 0 : return !null_or_empty(&st);
3977 : else
3978 : /* For non-empty files check the mtime */
3979 74 : return timespec_load(&st.st_mtim) > mtime;
3980 :
3981 : return false;
3982 : }
3983 :
3984 1190 : bool unit_need_daemon_reload(Unit *u) {
3985 1190 : _cleanup_strv_free_ char **t = NULL;
3986 : char **path;
3987 :
3988 1190 : assert(u);
3989 :
3990 : /* For unit files, we allow masking… */
3991 1190 : if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3992 1190 : u->load_state == UNIT_MASKED))
3993 0 : return true;
3994 :
3995 : /* Source paths should not be masked… */
3996 1190 : if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3997 0 : return true;
3998 :
3999 1190 : if (u->load_state == UNIT_LOADED)
4000 1004 : (void) unit_find_dropin_paths(u, &t);
4001 1190 : if (!strv_equal(u->dropin_paths, t))
4002 0 : return true;
4003 :
4004 : /* … any drop-ins that are masked are simply omitted from the list. */
4005 1190 : STRV_FOREACH(path, u->dropin_paths)
4006 0 : if (fragment_mtime_newer(*path, u->dropin_mtime, false))
4007 0 : return true;
4008 :
4009 1190 : return false;
4010 : }
4011 :
4012 0 : void unit_reset_failed(Unit *u) {
4013 0 : assert(u);
4014 :
4015 0 : if (UNIT_VTABLE(u)->reset_failed)
4016 0 : UNIT_VTABLE(u)->reset_failed(u);
4017 :
4018 0 : RATELIMIT_RESET(u->start_limit);
4019 0 : u->start_limit_hit = false;
4020 0 : }
4021 :
4022 1228 : Unit *unit_following(Unit *u) {
4023 1228 : assert(u);
4024 :
4025 1228 : if (UNIT_VTABLE(u)->following)
4026 828 : return UNIT_VTABLE(u)->following(u);
4027 :
4028 400 : return NULL;
4029 : }
4030 :
4031 6 : bool unit_stop_pending(Unit *u) {
4032 6 : assert(u);
4033 :
4034 : /* This call does check the current state of the unit. It's
4035 : * hence useful to be called from state change calls of the
4036 : * unit itself, where the state isn't updated yet. This is
4037 : * different from unit_inactive_or_pending() which checks both
4038 : * the current state and for a queued job. */
4039 :
4040 6 : return u->job && u->job->type == JOB_STOP;
4041 : }
4042 :
4043 0 : bool unit_inactive_or_pending(Unit *u) {
4044 0 : assert(u);
4045 :
4046 : /* Returns true if the unit is inactive or going down */
4047 :
4048 0 : if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
4049 0 : return true;
4050 :
4051 0 : if (unit_stop_pending(u))
4052 0 : return true;
4053 :
4054 0 : return false;
4055 : }
4056 :
4057 30 : bool unit_active_or_pending(Unit *u) {
4058 30 : assert(u);
4059 :
4060 : /* Returns true if the unit is active or going up */
4061 :
4062 30 : if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
4063 0 : return true;
4064 :
4065 30 : if (u->job &&
4066 0 : IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
4067 0 : return true;
4068 :
4069 30 : return false;
4070 : }
4071 :
4072 0 : bool unit_will_restart(Unit *u) {
4073 0 : assert(u);
4074 :
4075 0 : if (!UNIT_VTABLE(u)->will_restart)
4076 0 : return false;
4077 :
4078 0 : return UNIT_VTABLE(u)->will_restart(u);
4079 : }
4080 :
4081 0 : int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
4082 0 : assert(u);
4083 0 : assert(w >= 0 && w < _KILL_WHO_MAX);
4084 0 : assert(SIGNAL_VALID(signo));
4085 :
4086 0 : if (!UNIT_VTABLE(u)->kill)
4087 0 : return -EOPNOTSUPP;
4088 :
4089 0 : return UNIT_VTABLE(u)->kill(u, w, signo, error);
4090 : }
4091 :
4092 0 : static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4093 0 : _cleanup_set_free_ Set *pid_set = NULL;
4094 : int r;
4095 :
4096 0 : pid_set = set_new(NULL);
4097 0 : if (!pid_set)
4098 0 : return NULL;
4099 :
4100 : /* Exclude the main/control pids from being killed via the cgroup */
4101 0 : if (main_pid > 0) {
4102 0 : r = set_put(pid_set, PID_TO_PTR(main_pid));
4103 0 : if (r < 0)
4104 0 : return NULL;
4105 : }
4106 :
4107 0 : if (control_pid > 0) {
4108 0 : r = set_put(pid_set, PID_TO_PTR(control_pid));
4109 0 : if (r < 0)
4110 0 : return NULL;
4111 : }
4112 :
4113 0 : return TAKE_PTR(pid_set);
4114 : }
4115 :
4116 0 : int unit_kill_common(
4117 : Unit *u,
4118 : KillWho who,
4119 : int signo,
4120 : pid_t main_pid,
4121 : pid_t control_pid,
4122 : sd_bus_error *error) {
4123 :
4124 0 : int r = 0;
4125 0 : bool killed = false;
4126 :
4127 0 : if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4128 0 : if (main_pid < 0)
4129 0 : return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4130 0 : else if (main_pid == 0)
4131 0 : return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4132 : }
4133 :
4134 0 : if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4135 0 : if (control_pid < 0)
4136 0 : return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4137 0 : else if (control_pid == 0)
4138 0 : return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4139 : }
4140 :
4141 0 : if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4142 0 : if (control_pid > 0) {
4143 0 : if (kill(control_pid, signo) < 0)
4144 0 : r = -errno;
4145 : else
4146 0 : killed = true;
4147 : }
4148 :
4149 0 : if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4150 0 : if (main_pid > 0) {
4151 0 : if (kill(main_pid, signo) < 0)
4152 0 : r = -errno;
4153 : else
4154 0 : killed = true;
4155 : }
4156 :
4157 0 : if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4158 0 : _cleanup_set_free_ Set *pid_set = NULL;
4159 : int q;
4160 :
4161 : /* Exclude the main/control pids from being killed via the cgroup */
4162 0 : pid_set = unit_pid_set(main_pid, control_pid);
4163 0 : if (!pid_set)
4164 0 : return -ENOMEM;
4165 :
4166 0 : q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4167 0 : if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4168 0 : r = q;
4169 : else
4170 0 : killed = true;
4171 : }
4172 :
4173 0 : if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4174 0 : return -ESRCH;
4175 :
4176 0 : return r;
4177 : }
4178 :
4179 1372 : int unit_following_set(Unit *u, Set **s) {
4180 1372 : assert(u);
4181 1372 : assert(s);
4182 :
4183 1372 : if (UNIT_VTABLE(u)->following_set)
4184 828 : return UNIT_VTABLE(u)->following_set(u, s);
4185 :
4186 544 : *s = NULL;
4187 544 : return 0;
4188 : }
4189 :
4190 0 : UnitFileState unit_get_unit_file_state(Unit *u) {
4191 : int r;
4192 :
4193 0 : assert(u);
4194 :
4195 0 : if (u->unit_file_state < 0 && u->fragment_path) {
4196 0 : r = unit_file_get_state(
4197 0 : u->manager->unit_file_scope,
4198 : NULL,
4199 0 : u->id,
4200 : &u->unit_file_state);
4201 0 : if (r < 0)
4202 0 : u->unit_file_state = UNIT_FILE_BAD;
4203 : }
4204 :
4205 0 : return u->unit_file_state;
4206 : }
4207 :
4208 0 : int unit_get_unit_file_preset(Unit *u) {
4209 0 : assert(u);
4210 :
4211 0 : if (u->unit_file_preset < 0 && u->fragment_path)
4212 0 : u->unit_file_preset = unit_file_query_preset(
4213 0 : u->manager->unit_file_scope,
4214 : NULL,
4215 0 : basename(u->fragment_path));
4216 :
4217 0 : return u->unit_file_preset;
4218 : }
4219 :
4220 282 : Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4221 282 : assert(ref);
4222 282 : assert(source);
4223 282 : assert(target);
4224 :
4225 282 : if (ref->target)
4226 0 : unit_ref_unset(ref);
4227 :
4228 282 : ref->source = source;
4229 282 : ref->target = target;
4230 282 : LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4231 282 : return target;
4232 : }
4233 :
4234 2305 : void unit_ref_unset(UnitRef *ref) {
4235 2305 : assert(ref);
4236 :
4237 2305 : if (!ref->target)
4238 2023 : return;
4239 :
4240 : /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4241 : * be unreferenced now. */
4242 282 : unit_add_to_gc_queue(ref->target);
4243 :
4244 282 : LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4245 282 : ref->source = ref->target = NULL;
4246 : }
4247 :
4248 0 : static int user_from_unit_name(Unit *u, char **ret) {
4249 :
4250 : static const uint8_t hash_key[] = {
4251 : 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4252 : 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4253 : };
4254 :
4255 0 : _cleanup_free_ char *n = NULL;
4256 : int r;
4257 :
4258 0 : r = unit_name_to_prefix(u->id, &n);
4259 0 : if (r < 0)
4260 0 : return r;
4261 :
4262 0 : if (valid_user_group_name(n)) {
4263 0 : *ret = TAKE_PTR(n);
4264 0 : return 0;
4265 : }
4266 :
4267 : /* If we can't use the unit name as a user name, then let's hash it and use that */
4268 0 : if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4269 0 : return -ENOMEM;
4270 :
4271 0 : return 0;
4272 : }
4273 :
4274 304 : int unit_patch_contexts(Unit *u) {
4275 : CGroupContext *cc;
4276 : ExecContext *ec;
4277 : unsigned i;
4278 : int r;
4279 :
4280 304 : assert(u);
4281 :
4282 : /* Patch in the manager defaults into the exec and cgroup
4283 : * contexts, _after_ the rest of the settings have been
4284 : * initialized */
4285 :
4286 304 : ec = unit_get_exec_context(u);
4287 304 : if (ec) {
4288 : /* This only copies in the ones that need memory */
4289 4675 : for (i = 0; i < _RLIMIT_MAX; i++)
4290 4400 : if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4291 0 : ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4292 0 : if (!ec->rlimit[i])
4293 0 : return -ENOMEM;
4294 : }
4295 :
4296 275 : if (MANAGER_IS_USER(u->manager) &&
4297 275 : !ec->working_directory) {
4298 :
4299 264 : r = get_home_dir(&ec->working_directory);
4300 264 : if (r < 0)
4301 0 : return r;
4302 :
4303 : /* Allow user services to run, even if the
4304 : * home directory is missing */
4305 264 : ec->working_directory_missing_ok = true;
4306 : }
4307 :
4308 275 : if (ec->private_devices)
4309 0 : ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4310 :
4311 275 : if (ec->protect_kernel_modules)
4312 0 : ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4313 :
4314 275 : if (ec->dynamic_user) {
4315 0 : if (!ec->user) {
4316 0 : r = user_from_unit_name(u, &ec->user);
4317 0 : if (r < 0)
4318 0 : return r;
4319 : }
4320 :
4321 0 : if (!ec->group) {
4322 0 : ec->group = strdup(ec->user);
4323 0 : if (!ec->group)
4324 0 : return -ENOMEM;
4325 : }
4326 :
4327 : /* If the dynamic user option is on, let's make sure that the unit can't leave its
4328 : * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4329 : * sandbox. */
4330 :
4331 0 : ec->private_tmp = true;
4332 0 : ec->remove_ipc = true;
4333 0 : ec->protect_system = PROTECT_SYSTEM_STRICT;
4334 0 : if (ec->protect_home == PROTECT_HOME_NO)
4335 0 : ec->protect_home = PROTECT_HOME_READ_ONLY;
4336 :
4337 : /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4338 : * them. */
4339 0 : ec->no_new_privileges = true;
4340 0 : ec->restrict_suid_sgid = true;
4341 : }
4342 : }
4343 :
4344 304 : cc = unit_get_cgroup_context(u);
4345 304 : if (cc && ec) {
4346 :
4347 275 : if (ec->private_devices &&
4348 0 : cc->device_policy == CGROUP_AUTO)
4349 0 : cc->device_policy = CGROUP_CLOSED;
4350 :
4351 275 : if (ec->root_image &&
4352 0 : (cc->device_policy != CGROUP_AUTO || cc->device_allow)) {
4353 :
4354 : /* When RootImage= is specified, the following devices are touched. */
4355 0 : r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4356 0 : if (r < 0)
4357 0 : return r;
4358 :
4359 0 : r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4360 0 : if (r < 0)
4361 0 : return r;
4362 :
4363 0 : r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4364 0 : if (r < 0)
4365 0 : return r;
4366 : }
4367 : }
4368 :
4369 304 : return 0;
4370 : }
4371 :
4372 6932 : ExecContext *unit_get_exec_context(Unit *u) {
4373 : size_t offset;
4374 6932 : assert(u);
4375 :
4376 6932 : if (u->type < 0)
4377 0 : return NULL;
4378 :
4379 6932 : offset = UNIT_VTABLE(u)->exec_context_offset;
4380 6932 : if (offset <= 0)
4381 4814 : return NULL;
4382 :
4383 2118 : return (ExecContext*) ((uint8_t*) u + offset);
4384 : }
4385 :
4386 2158 : KillContext *unit_get_kill_context(Unit *u) {
4387 : size_t offset;
4388 2158 : assert(u);
4389 :
4390 2158 : if (u->type < 0)
4391 0 : return NULL;
4392 :
4393 2158 : offset = UNIT_VTABLE(u)->kill_context_offset;
4394 2158 : if (offset <= 0)
4395 1588 : return NULL;
4396 :
4397 570 : return (KillContext*) ((uint8_t*) u + offset);
4398 : }
4399 :
4400 11377 : CGroupContext *unit_get_cgroup_context(Unit *u) {
4401 : size_t offset;
4402 :
4403 11377 : if (u->type < 0)
4404 0 : return NULL;
4405 :
4406 11377 : offset = UNIT_VTABLE(u)->cgroup_context_offset;
4407 11377 : if (offset <= 0)
4408 5605 : return NULL;
4409 :
4410 5772 : return (CGroupContext*) ((uint8_t*) u + offset);
4411 : }
4412 :
4413 0 : ExecRuntime *unit_get_exec_runtime(Unit *u) {
4414 : size_t offset;
4415 :
4416 0 : if (u->type < 0)
4417 0 : return NULL;
4418 :
4419 0 : offset = UNIT_VTABLE(u)->exec_runtime_offset;
4420 0 : if (offset <= 0)
4421 0 : return NULL;
4422 :
4423 0 : return *(ExecRuntime**) ((uint8_t*) u + offset);
4424 : }
4425 :
4426 0 : static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4427 0 : assert(u);
4428 :
4429 0 : if (UNIT_WRITE_FLAGS_NOOP(flags))
4430 0 : return NULL;
4431 :
4432 0 : if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4433 0 : return u->manager->lookup_paths.transient;
4434 :
4435 0 : if (flags & UNIT_PERSISTENT)
4436 0 : return u->manager->lookup_paths.persistent_control;
4437 :
4438 0 : if (flags & UNIT_RUNTIME)
4439 0 : return u->manager->lookup_paths.runtime_control;
4440 :
4441 0 : return NULL;
4442 : }
4443 :
4444 0 : char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4445 0 : char *ret = NULL;
4446 :
4447 0 : if (!s)
4448 0 : return NULL;
4449 :
4450 : /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4451 : * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4452 : * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4453 : * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4454 : * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4455 : * allocations. */
4456 :
4457 0 : if (flags & UNIT_ESCAPE_SPECIFIERS) {
4458 0 : ret = specifier_escape(s);
4459 0 : if (!ret)
4460 0 : return NULL;
4461 :
4462 0 : s = ret;
4463 : }
4464 :
4465 0 : if (flags & UNIT_ESCAPE_C) {
4466 : char *a;
4467 :
4468 0 : a = cescape(s);
4469 0 : free(ret);
4470 0 : if (!a)
4471 0 : return NULL;
4472 :
4473 0 : ret = a;
4474 : }
4475 :
4476 0 : if (buf) {
4477 0 : *buf = ret;
4478 0 : return ret ?: (char*) s;
4479 : }
4480 :
4481 0 : return ret ?: strdup(s);
4482 : }
4483 :
4484 0 : char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4485 0 : _cleanup_free_ char *result = NULL;
4486 0 : size_t n = 0, allocated = 0;
4487 : char **i;
4488 :
4489 : /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4490 : * way suitable for ExecStart= stanzas */
4491 :
4492 0 : STRV_FOREACH(i, l) {
4493 0 : _cleanup_free_ char *buf = NULL;
4494 : const char *p;
4495 : size_t a;
4496 : char *q;
4497 :
4498 0 : p = unit_escape_setting(*i, flags, &buf);
4499 0 : if (!p)
4500 0 : return NULL;
4501 :
4502 0 : a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4503 0 : if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4504 0 : return NULL;
4505 :
4506 0 : q = result + n;
4507 0 : if (n > 0)
4508 0 : *(q++) = ' ';
4509 :
4510 0 : *(q++) = '"';
4511 0 : q = stpcpy(q, p);
4512 0 : *(q++) = '"';
4513 :
4514 0 : n += a;
4515 : }
4516 :
4517 0 : if (!GREEDY_REALLOC(result, allocated, n + 1))
4518 0 : return NULL;
4519 :
4520 0 : result[n] = 0;
4521 :
4522 0 : return TAKE_PTR(result);
4523 : }
4524 :
4525 0 : int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4526 0 : _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4527 : const char *dir, *wrapped;
4528 : int r;
4529 :
4530 0 : assert(u);
4531 0 : assert(name);
4532 0 : assert(data);
4533 :
4534 0 : if (UNIT_WRITE_FLAGS_NOOP(flags))
4535 0 : return 0;
4536 :
4537 0 : data = unit_escape_setting(data, flags, &escaped);
4538 0 : if (!data)
4539 0 : return -ENOMEM;
4540 :
4541 : /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4542 : * previous section header is the same */
4543 :
4544 0 : if (flags & UNIT_PRIVATE) {
4545 0 : if (!UNIT_VTABLE(u)->private_section)
4546 0 : return -EINVAL;
4547 :
4548 0 : if (!u->transient_file || u->last_section_private < 0)
4549 0 : data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4550 0 : else if (u->last_section_private == 0)
4551 0 : data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4552 : } else {
4553 0 : if (!u->transient_file || u->last_section_private < 0)
4554 0 : data = strjoina("[Unit]\n", data);
4555 0 : else if (u->last_section_private > 0)
4556 0 : data = strjoina("\n[Unit]\n", data);
4557 : }
4558 :
4559 0 : if (u->transient_file) {
4560 : /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4561 : * write to the transient unit file. */
4562 0 : fputs(data, u->transient_file);
4563 :
4564 0 : if (!endswith(data, "\n"))
4565 0 : fputc('\n', u->transient_file);
4566 :
4567 : /* Remember which section we wrote this entry to */
4568 0 : u->last_section_private = !!(flags & UNIT_PRIVATE);
4569 0 : return 0;
4570 : }
4571 :
4572 0 : dir = unit_drop_in_dir(u, flags);
4573 0 : if (!dir)
4574 0 : return -EINVAL;
4575 :
4576 0 : wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4577 : "# or an equivalent operation. Do not edit.\n",
4578 : data,
4579 : "\n");
4580 :
4581 0 : r = drop_in_file(dir, u->id, 50, name, &p, &q);
4582 0 : if (r < 0)
4583 0 : return r;
4584 :
4585 0 : (void) mkdir_p_label(p, 0755);
4586 :
4587 : /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4588 : * recreate the cache after every drop-in we write. */
4589 0 : if (u->manager->unit_path_cache) {
4590 0 : r = set_put_strdup(u->manager->unit_path_cache, p);
4591 0 : if (r < 0)
4592 0 : return r;
4593 : }
4594 :
4595 0 : r = write_string_file_atomic_label(q, wrapped);
4596 0 : if (r < 0)
4597 0 : return r;
4598 :
4599 0 : r = strv_push(&u->dropin_paths, q);
4600 0 : if (r < 0)
4601 0 : return r;
4602 0 : q = NULL;
4603 :
4604 0 : strv_uniq(u->dropin_paths);
4605 :
4606 0 : u->dropin_mtime = now(CLOCK_REALTIME);
4607 :
4608 0 : return 0;
4609 : }
4610 :
4611 0 : int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4612 0 : _cleanup_free_ char *p = NULL;
4613 : va_list ap;
4614 : int r;
4615 :
4616 0 : assert(u);
4617 0 : assert(name);
4618 0 : assert(format);
4619 :
4620 0 : if (UNIT_WRITE_FLAGS_NOOP(flags))
4621 0 : return 0;
4622 :
4623 0 : va_start(ap, format);
4624 0 : r = vasprintf(&p, format, ap);
4625 0 : va_end(ap);
4626 :
4627 0 : if (r < 0)
4628 0 : return -ENOMEM;
4629 :
4630 0 : return unit_write_setting(u, flags, name, p);
4631 : }
4632 :
4633 0 : int unit_make_transient(Unit *u) {
4634 0 : _cleanup_free_ char *path = NULL;
4635 : FILE *f;
4636 :
4637 0 : assert(u);
4638 :
4639 0 : if (!UNIT_VTABLE(u)->can_transient)
4640 0 : return -EOPNOTSUPP;
4641 :
4642 0 : (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4643 :
4644 0 : path = path_join(u->manager->lookup_paths.transient, u->id);
4645 0 : if (!path)
4646 0 : return -ENOMEM;
4647 :
4648 : /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4649 : * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4650 :
4651 0 : RUN_WITH_UMASK(0022) {
4652 0 : f = fopen(path, "we");
4653 0 : if (!f)
4654 0 : return -errno;
4655 : }
4656 :
4657 0 : safe_fclose(u->transient_file);
4658 0 : u->transient_file = f;
4659 :
4660 0 : free_and_replace(u->fragment_path, path);
4661 :
4662 0 : u->source_path = mfree(u->source_path);
4663 0 : u->dropin_paths = strv_free(u->dropin_paths);
4664 0 : u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4665 :
4666 0 : u->load_state = UNIT_STUB;
4667 0 : u->load_error = 0;
4668 0 : u->transient = true;
4669 :
4670 0 : unit_add_to_dbus_queue(u);
4671 0 : unit_add_to_gc_queue(u);
4672 :
4673 0 : fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4674 : u->transient_file);
4675 :
4676 0 : return 0;
4677 : }
4678 :
4679 0 : static int log_kill(pid_t pid, int sig, void *userdata) {
4680 0 : _cleanup_free_ char *comm = NULL;
4681 :
4682 0 : (void) get_process_comm(pid, &comm);
4683 :
4684 : /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4685 : only, like for example systemd's own PAM stub process. */
4686 0 : if (comm && comm[0] == '(')
4687 0 : return 0;
4688 :
4689 0 : log_unit_notice(userdata,
4690 : "Killing process " PID_FMT " (%s) with signal SIG%s.",
4691 : pid,
4692 : strna(comm),
4693 : signal_to_string(sig));
4694 :
4695 0 : return 1;
4696 : }
4697 :
4698 0 : static int operation_to_signal(KillContext *c, KillOperation k) {
4699 0 : assert(c);
4700 :
4701 0 : switch (k) {
4702 :
4703 0 : case KILL_TERMINATE:
4704 : case KILL_TERMINATE_AND_LOG:
4705 0 : return c->kill_signal;
4706 :
4707 0 : case KILL_KILL:
4708 0 : return c->final_kill_signal;
4709 :
4710 0 : case KILL_WATCHDOG:
4711 0 : return c->watchdog_signal;
4712 :
4713 0 : default:
4714 0 : assert_not_reached("KillOperation unknown");
4715 : }
4716 : }
4717 :
4718 0 : int unit_kill_context(
4719 : Unit *u,
4720 : KillContext *c,
4721 : KillOperation k,
4722 : pid_t main_pid,
4723 : pid_t control_pid,
4724 : bool main_pid_alien) {
4725 :
4726 0 : bool wait_for_exit = false, send_sighup;
4727 0 : cg_kill_log_func_t log_func = NULL;
4728 : int sig, r;
4729 :
4730 0 : assert(u);
4731 0 : assert(c);
4732 :
4733 : /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4734 : * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4735 :
4736 0 : if (c->kill_mode == KILL_NONE)
4737 0 : return 0;
4738 :
4739 0 : sig = operation_to_signal(c, k);
4740 :
4741 0 : send_sighup =
4742 0 : c->send_sighup &&
4743 0 : IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4744 : sig != SIGHUP;
4745 :
4746 0 : if (k != KILL_TERMINATE || IN_SET(sig, SIGKILL, SIGABRT))
4747 0 : log_func = log_kill;
4748 :
4749 0 : if (main_pid > 0) {
4750 0 : if (log_func)
4751 0 : log_func(main_pid, sig, u);
4752 :
4753 0 : r = kill_and_sigcont(main_pid, sig);
4754 0 : if (r < 0 && r != -ESRCH) {
4755 0 : _cleanup_free_ char *comm = NULL;
4756 0 : (void) get_process_comm(main_pid, &comm);
4757 :
4758 0 : log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4759 : } else {
4760 0 : if (!main_pid_alien)
4761 0 : wait_for_exit = true;
4762 :
4763 0 : if (r != -ESRCH && send_sighup)
4764 0 : (void) kill(main_pid, SIGHUP);
4765 : }
4766 : }
4767 :
4768 0 : if (control_pid > 0) {
4769 0 : if (log_func)
4770 0 : log_func(control_pid, sig, u);
4771 :
4772 0 : r = kill_and_sigcont(control_pid, sig);
4773 0 : if (r < 0 && r != -ESRCH) {
4774 0 : _cleanup_free_ char *comm = NULL;
4775 0 : (void) get_process_comm(control_pid, &comm);
4776 :
4777 0 : log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4778 : } else {
4779 0 : wait_for_exit = true;
4780 :
4781 0 : if (r != -ESRCH && send_sighup)
4782 0 : (void) kill(control_pid, SIGHUP);
4783 : }
4784 : }
4785 :
4786 0 : if (u->cgroup_path &&
4787 0 : (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4788 0 : _cleanup_set_free_ Set *pid_set = NULL;
4789 :
4790 : /* Exclude the main/control pids from being killed via the cgroup */
4791 0 : pid_set = unit_pid_set(main_pid, control_pid);
4792 0 : if (!pid_set)
4793 0 : return -ENOMEM;
4794 :
4795 0 : r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4796 : sig,
4797 : CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4798 : pid_set,
4799 : log_func, u);
4800 0 : if (r < 0) {
4801 0 : if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4802 0 : log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4803 :
4804 0 : } else if (r > 0) {
4805 :
4806 : /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4807 : * we are running in a container or if this is a delegation unit, simply because cgroup
4808 : * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4809 : * of containers it can be confused easily by left-over directories in the cgroup — which
4810 : * however should not exist in non-delegated units. On the unified hierarchy that's different,
4811 : * there we get proper events. Hence rely on them. */
4812 :
4813 0 : if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4814 0 : (detect_container() == 0 && !unit_cgroup_delegate(u)))
4815 0 : wait_for_exit = true;
4816 :
4817 0 : if (send_sighup) {
4818 0 : set_free(pid_set);
4819 :
4820 0 : pid_set = unit_pid_set(main_pid, control_pid);
4821 0 : if (!pid_set)
4822 0 : return -ENOMEM;
4823 :
4824 0 : cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4825 : SIGHUP,
4826 : CGROUP_IGNORE_SELF,
4827 : pid_set,
4828 : NULL, NULL);
4829 : }
4830 : }
4831 : }
4832 :
4833 0 : return wait_for_exit;
4834 : }
4835 :
4836 249 : int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4837 249 : _cleanup_free_ char *p = NULL;
4838 : UnitDependencyInfo di;
4839 : int r;
4840 :
4841 249 : assert(u);
4842 249 : assert(path);
4843 :
4844 : /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4845 : * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4846 : * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4847 : * determine which units to make themselves a dependency of. */
4848 :
4849 249 : if (!path_is_absolute(path))
4850 0 : return -EINVAL;
4851 :
4852 249 : r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4853 249 : if (r < 0)
4854 0 : return r;
4855 :
4856 249 : p = strdup(path);
4857 249 : if (!p)
4858 0 : return -ENOMEM;
4859 :
4860 249 : path = path_simplify(p, true);
4861 :
4862 249 : if (!path_is_normalized(path))
4863 0 : return -EPERM;
4864 :
4865 249 : if (hashmap_contains(u->requires_mounts_for, path))
4866 0 : return 0;
4867 :
4868 249 : di = (UnitDependencyInfo) {
4869 : .origin_mask = mask
4870 : };
4871 :
4872 249 : r = hashmap_put(u->requires_mounts_for, path, di.data);
4873 249 : if (r < 0)
4874 0 : return r;
4875 249 : p = NULL;
4876 :
4877 249 : char prefix[strlen(path) + 1];
4878 1051 : PATH_FOREACH_PREFIX_MORE(prefix, path) {
4879 : Set *x;
4880 :
4881 802 : x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4882 802 : if (!x) {
4883 322 : _cleanup_free_ char *q = NULL;
4884 :
4885 322 : r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4886 322 : if (r < 0)
4887 0 : return r;
4888 :
4889 322 : q = strdup(prefix);
4890 322 : if (!q)
4891 0 : return -ENOMEM;
4892 :
4893 322 : x = set_new(NULL);
4894 322 : if (!x)
4895 0 : return -ENOMEM;
4896 :
4897 322 : r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4898 322 : if (r < 0) {
4899 0 : set_free(x);
4900 0 : return r;
4901 : }
4902 322 : q = NULL;
4903 : }
4904 :
4905 802 : r = set_put(x, u);
4906 802 : if (r < 0)
4907 0 : return r;
4908 : }
4909 :
4910 249 : return 0;
4911 : }
4912 :
4913 237 : int unit_setup_exec_runtime(Unit *u) {
4914 : ExecRuntime **rt;
4915 : size_t offset;
4916 : Unit *other;
4917 : Iterator i;
4918 : void *v;
4919 : int r;
4920 :
4921 237 : offset = UNIT_VTABLE(u)->exec_runtime_offset;
4922 237 : assert(offset > 0);
4923 :
4924 : /* Check if there already is an ExecRuntime for this unit? */
4925 237 : rt = (ExecRuntime**) ((uint8_t*) u + offset);
4926 237 : if (*rt)
4927 0 : return 0;
4928 :
4929 : /* Try to get it from somebody else */
4930 237 : HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4931 0 : r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4932 0 : if (r == 1)
4933 0 : return 1;
4934 : }
4935 :
4936 237 : return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4937 : }
4938 :
4939 237 : int unit_setup_dynamic_creds(Unit *u) {
4940 : ExecContext *ec;
4941 : DynamicCreds *dcreds;
4942 : size_t offset;
4943 :
4944 237 : assert(u);
4945 :
4946 237 : offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4947 237 : assert(offset > 0);
4948 237 : dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4949 :
4950 237 : ec = unit_get_exec_context(u);
4951 237 : assert(ec);
4952 :
4953 237 : if (!ec->dynamic_user)
4954 237 : return 0;
4955 :
4956 0 : return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4957 : }
4958 :
4959 308 : bool unit_type_supported(UnitType t) {
4960 308 : if (_unlikely_(t < 0))
4961 0 : return false;
4962 308 : if (_unlikely_(t >= _UNIT_TYPE_MAX))
4963 0 : return false;
4964 :
4965 308 : if (!unit_vtable[t]->supported)
4966 209 : return true;
4967 :
4968 99 : return unit_vtable[t]->supported();
4969 : }
4970 :
4971 0 : void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4972 : int r;
4973 :
4974 0 : assert(u);
4975 0 : assert(where);
4976 :
4977 0 : r = dir_is_empty(where);
4978 0 : if (r > 0 || r == -ENOTDIR)
4979 0 : return;
4980 0 : if (r < 0) {
4981 0 : log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4982 0 : return;
4983 : }
4984 :
4985 0 : log_struct(LOG_NOTICE,
4986 : "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4987 : LOG_UNIT_ID(u),
4988 : LOG_UNIT_INVOCATION_ID(u),
4989 : LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4990 : "WHERE=%s", where);
4991 : }
4992 :
4993 0 : int unit_fail_if_noncanonical(Unit *u, const char* where) {
4994 0 : _cleanup_free_ char *canonical_where = NULL;
4995 : int r;
4996 :
4997 0 : assert(u);
4998 0 : assert(where);
4999 :
5000 0 : r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where);
5001 0 : if (r < 0) {
5002 0 : log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5003 0 : return 0;
5004 : }
5005 :
5006 : /* We will happily ignore a trailing slash (or any redundant slashes) */
5007 0 : if (path_equal(where, canonical_where))
5008 0 : return 0;
5009 :
5010 : /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5011 0 : log_struct(LOG_ERR,
5012 : "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5013 : LOG_UNIT_ID(u),
5014 : LOG_UNIT_INVOCATION_ID(u),
5015 : LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5016 : "WHERE=%s", where);
5017 :
5018 0 : return -ELOOP;
5019 : }
5020 :
5021 0 : bool unit_is_pristine(Unit *u) {
5022 0 : assert(u);
5023 :
5024 : /* Check if the unit already exists or is already around,
5025 : * in a number of different ways. Note that to cater for unit
5026 : * types such as slice, we are generally fine with units that
5027 : * are marked UNIT_LOADED even though nothing was actually
5028 : * loaded, as those unit types don't require a file on disk. */
5029 :
5030 0 : return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
5031 0 : u->fragment_path ||
5032 0 : u->source_path ||
5033 0 : !strv_isempty(u->dropin_paths) ||
5034 0 : u->job ||
5035 0 : u->merged_into);
5036 : }
5037 :
5038 0 : pid_t unit_control_pid(Unit *u) {
5039 0 : assert(u);
5040 :
5041 0 : if (UNIT_VTABLE(u)->control_pid)
5042 0 : return UNIT_VTABLE(u)->control_pid(u);
5043 :
5044 0 : return 0;
5045 : }
5046 :
5047 0 : pid_t unit_main_pid(Unit *u) {
5048 0 : assert(u);
5049 :
5050 0 : if (UNIT_VTABLE(u)->main_pid)
5051 0 : return UNIT_VTABLE(u)->main_pid(u);
5052 :
5053 0 : return 0;
5054 : }
5055 :
5056 4320 : static void unit_unref_uid_internal(
5057 : Unit *u,
5058 : uid_t *ref_uid,
5059 : bool destroy_now,
5060 : void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5061 :
5062 4320 : assert(u);
5063 4320 : assert(ref_uid);
5064 4320 : assert(_manager_unref_uid);
5065 :
5066 : /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5067 : * gid_t are actually the same time, with the same validity rules.
5068 : *
5069 : * Drops a reference to UID/GID from a unit. */
5070 :
5071 : assert_cc(sizeof(uid_t) == sizeof(gid_t));
5072 : assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5073 :
5074 4320 : if (!uid_is_valid(*ref_uid))
5075 4320 : return;
5076 :
5077 0 : _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5078 0 : *ref_uid = UID_INVALID;
5079 : }
5080 :
5081 2160 : void unit_unref_uid(Unit *u, bool destroy_now) {
5082 2160 : unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5083 2160 : }
5084 :
5085 2160 : void unit_unref_gid(Unit *u, bool destroy_now) {
5086 2160 : unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5087 2160 : }
5088 :
5089 0 : static int unit_ref_uid_internal(
5090 : Unit *u,
5091 : uid_t *ref_uid,
5092 : uid_t uid,
5093 : bool clean_ipc,
5094 : int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5095 :
5096 : int r;
5097 :
5098 0 : assert(u);
5099 0 : assert(ref_uid);
5100 0 : assert(uid_is_valid(uid));
5101 0 : assert(_manager_ref_uid);
5102 :
5103 : /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5104 : * are actually the same type, and have the same validity rules.
5105 : *
5106 : * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5107 : * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5108 : * drops to zero. */
5109 :
5110 : assert_cc(sizeof(uid_t) == sizeof(gid_t));
5111 : assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5112 :
5113 0 : if (*ref_uid == uid)
5114 0 : return 0;
5115 :
5116 0 : if (uid_is_valid(*ref_uid)) /* Already set? */
5117 0 : return -EBUSY;
5118 :
5119 0 : r = _manager_ref_uid(u->manager, uid, clean_ipc);
5120 0 : if (r < 0)
5121 0 : return r;
5122 :
5123 0 : *ref_uid = uid;
5124 0 : return 1;
5125 : }
5126 :
5127 0 : int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5128 0 : return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5129 : }
5130 :
5131 0 : int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5132 0 : return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5133 : }
5134 :
5135 0 : static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5136 0 : int r = 0, q = 0;
5137 :
5138 0 : assert(u);
5139 :
5140 : /* Reference both a UID and a GID in one go. Either references both, or neither. */
5141 :
5142 0 : if (uid_is_valid(uid)) {
5143 0 : r = unit_ref_uid(u, uid, clean_ipc);
5144 0 : if (r < 0)
5145 0 : return r;
5146 : }
5147 :
5148 0 : if (gid_is_valid(gid)) {
5149 0 : q = unit_ref_gid(u, gid, clean_ipc);
5150 0 : if (q < 0) {
5151 0 : if (r > 0)
5152 0 : unit_unref_uid(u, false);
5153 :
5154 0 : return q;
5155 : }
5156 : }
5157 :
5158 0 : return r > 0 || q > 0;
5159 : }
5160 :
5161 0 : int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5162 : ExecContext *c;
5163 : int r;
5164 :
5165 0 : assert(u);
5166 :
5167 0 : c = unit_get_exec_context(u);
5168 :
5169 0 : r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5170 0 : if (r < 0)
5171 0 : return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5172 :
5173 0 : return r;
5174 : }
5175 :
5176 2160 : void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5177 2160 : assert(u);
5178 :
5179 2160 : unit_unref_uid(u, destroy_now);
5180 2160 : unit_unref_gid(u, destroy_now);
5181 2160 : }
5182 :
5183 0 : void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5184 : int r;
5185 :
5186 0 : assert(u);
5187 :
5188 : /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5189 : * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5190 : * objects when no service references the UID/GID anymore. */
5191 :
5192 0 : r = unit_ref_uid_gid(u, uid, gid);
5193 0 : if (r > 0)
5194 0 : unit_add_to_dbus_queue(u);
5195 0 : }
5196 :
5197 1483 : int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5198 : int r;
5199 :
5200 1483 : assert(u);
5201 :
5202 : /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5203 :
5204 1483 : if (sd_id128_equal(u->invocation_id, id))
5205 0 : return 0;
5206 :
5207 1483 : if (!sd_id128_is_null(u->invocation_id))
5208 0 : (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5209 :
5210 1483 : if (sd_id128_is_null(id)) {
5211 0 : r = 0;
5212 0 : goto reset;
5213 : }
5214 :
5215 1483 : r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5216 1483 : if (r < 0)
5217 0 : goto reset;
5218 :
5219 1483 : u->invocation_id = id;
5220 1483 : sd_id128_to_string(id, u->invocation_id_string);
5221 :
5222 1483 : r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5223 1483 : if (r < 0)
5224 0 : goto reset;
5225 :
5226 1483 : return 0;
5227 :
5228 0 : reset:
5229 0 : u->invocation_id = SD_ID128_NULL;
5230 0 : u->invocation_id_string[0] = 0;
5231 0 : return r;
5232 : }
5233 :
5234 1483 : int unit_acquire_invocation_id(Unit *u) {
5235 : sd_id128_t id;
5236 : int r;
5237 :
5238 1483 : assert(u);
5239 :
5240 1483 : r = sd_id128_randomize(&id);
5241 1483 : if (r < 0)
5242 0 : return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5243 :
5244 1483 : r = unit_set_invocation_id(u, id);
5245 1483 : if (r < 0)
5246 0 : return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5247 :
5248 1483 : unit_add_to_dbus_queue(u);
5249 1483 : return 0;
5250 : }
5251 :
5252 6 : int unit_set_exec_params(Unit *u, ExecParameters *p) {
5253 : int r;
5254 :
5255 6 : assert(u);
5256 6 : assert(p);
5257 :
5258 : /* Copy parameters from manager */
5259 6 : r = manager_get_effective_environment(u->manager, &p->environment);
5260 6 : if (r < 0)
5261 0 : return r;
5262 :
5263 6 : p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5264 6 : p->cgroup_supported = u->manager->cgroup_supported;
5265 6 : p->prefix = u->manager->prefix;
5266 6 : SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5267 :
5268 : /* Copy parameters from unit */
5269 6 : p->cgroup_path = u->cgroup_path;
5270 6 : SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5271 :
5272 6 : return 0;
5273 : }
5274 :
5275 0 : int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5276 : int r;
5277 :
5278 0 : assert(u);
5279 0 : assert(ret);
5280 :
5281 : /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5282 : * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5283 :
5284 0 : (void) unit_realize_cgroup(u);
5285 :
5286 0 : r = safe_fork(name, FORK_REOPEN_LOG, ret);
5287 0 : if (r != 0)
5288 0 : return r;
5289 :
5290 0 : (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5291 0 : (void) ignore_signals(SIGPIPE, -1);
5292 :
5293 0 : (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5294 :
5295 0 : if (u->cgroup_path) {
5296 0 : r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5297 0 : if (r < 0) {
5298 0 : log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5299 0 : _exit(EXIT_CGROUP);
5300 : }
5301 : }
5302 :
5303 0 : return 0;
5304 : }
5305 :
5306 8 : static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5307 8 : assert(u);
5308 8 : assert(d >= 0);
5309 8 : assert(d < _UNIT_DEPENDENCY_MAX);
5310 8 : assert(other);
5311 :
5312 8 : if (di.origin_mask == 0 && di.destination_mask == 0) {
5313 : /* No bit set anymore, let's drop the whole entry */
5314 6 : assert_se(hashmap_remove(u->dependencies[d], other));
5315 6 : log_unit_debug(u, "%s lost dependency %s=%s", u->id, unit_dependency_to_string(d), other->id);
5316 : } else
5317 : /* Mask was reduced, let's update the entry */
5318 2 : assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5319 8 : }
5320 :
5321 68 : void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5322 : UnitDependency d;
5323 :
5324 68 : assert(u);
5325 :
5326 : /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5327 :
5328 68 : if (mask == 0)
5329 0 : return;
5330 :
5331 1564 : for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5332 : bool done;
5333 :
5334 : do {
5335 : UnitDependencyInfo di;
5336 : Unit *other;
5337 : Iterator i;
5338 :
5339 1500 : done = true;
5340 :
5341 1559 : HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5342 : UnitDependency q;
5343 :
5344 63 : if ((di.origin_mask & ~mask) == di.origin_mask)
5345 59 : continue;
5346 4 : di.origin_mask &= ~mask;
5347 4 : unit_update_dependency_mask(u, d, other, di);
5348 :
5349 : /* We updated the dependency from our unit to the other unit now. But most dependencies
5350 : * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5351 : * all dependency types on the other unit and delete all those which point to us and
5352 : * have the right mask set. */
5353 :
5354 92 : for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5355 : UnitDependencyInfo dj;
5356 :
5357 88 : dj.data = hashmap_get(other->dependencies[q], u);
5358 88 : if ((dj.destination_mask & ~mask) == dj.destination_mask)
5359 84 : continue;
5360 4 : dj.destination_mask &= ~mask;
5361 :
5362 4 : unit_update_dependency_mask(other, q, u, dj);
5363 : }
5364 :
5365 4 : unit_add_to_gc_queue(other);
5366 :
5367 4 : done = false;
5368 4 : break;
5369 : }
5370 :
5371 1500 : } while (!done);
5372 : }
5373 : }
5374 :
5375 0 : static int unit_export_invocation_id(Unit *u) {
5376 : const char *p;
5377 : int r;
5378 :
5379 0 : assert(u);
5380 :
5381 0 : if (u->exported_invocation_id)
5382 0 : return 0;
5383 :
5384 0 : if (sd_id128_is_null(u->invocation_id))
5385 0 : return 0;
5386 :
5387 0 : p = strjoina("/run/systemd/units/invocation:", u->id);
5388 0 : r = symlink_atomic(u->invocation_id_string, p);
5389 0 : if (r < 0)
5390 0 : return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5391 :
5392 0 : u->exported_invocation_id = true;
5393 0 : return 0;
5394 : }
5395 :
5396 0 : static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5397 : const char *p;
5398 : char buf[2];
5399 : int r;
5400 :
5401 0 : assert(u);
5402 0 : assert(c);
5403 :
5404 0 : if (u->exported_log_level_max)
5405 0 : return 0;
5406 :
5407 0 : if (c->log_level_max < 0)
5408 0 : return 0;
5409 :
5410 0 : assert(c->log_level_max <= 7);
5411 :
5412 0 : buf[0] = '0' + c->log_level_max;
5413 0 : buf[1] = 0;
5414 :
5415 0 : p = strjoina("/run/systemd/units/log-level-max:", u->id);
5416 0 : r = symlink_atomic(buf, p);
5417 0 : if (r < 0)
5418 0 : return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5419 :
5420 0 : u->exported_log_level_max = true;
5421 0 : return 0;
5422 : }
5423 :
5424 0 : static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5425 0 : _cleanup_close_ int fd = -1;
5426 : struct iovec *iovec;
5427 : const char *p;
5428 : char *pattern;
5429 : le64_t *sizes;
5430 : ssize_t n;
5431 : size_t i;
5432 : int r;
5433 :
5434 0 : if (u->exported_log_extra_fields)
5435 0 : return 0;
5436 :
5437 0 : if (c->n_log_extra_fields <= 0)
5438 0 : return 0;
5439 :
5440 0 : sizes = newa(le64_t, c->n_log_extra_fields);
5441 0 : iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5442 :
5443 0 : for (i = 0; i < c->n_log_extra_fields; i++) {
5444 0 : sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5445 :
5446 0 : iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5447 0 : iovec[i*2+1] = c->log_extra_fields[i];
5448 : }
5449 :
5450 0 : p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5451 0 : pattern = strjoina(p, ".XXXXXX");
5452 :
5453 0 : fd = mkostemp_safe(pattern);
5454 0 : if (fd < 0)
5455 0 : return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5456 :
5457 0 : n = writev(fd, iovec, c->n_log_extra_fields*2);
5458 0 : if (n < 0) {
5459 0 : r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5460 0 : goto fail;
5461 : }
5462 :
5463 0 : (void) fchmod(fd, 0644);
5464 :
5465 0 : if (rename(pattern, p) < 0) {
5466 0 : r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5467 0 : goto fail;
5468 : }
5469 :
5470 0 : u->exported_log_extra_fields = true;
5471 0 : return 0;
5472 :
5473 0 : fail:
5474 0 : (void) unlink(pattern);
5475 0 : return r;
5476 : }
5477 :
5478 0 : static int unit_export_log_rate_limit_interval(Unit *u, const ExecContext *c) {
5479 0 : _cleanup_free_ char *buf = NULL;
5480 : const char *p;
5481 : int r;
5482 :
5483 0 : assert(u);
5484 0 : assert(c);
5485 :
5486 0 : if (u->exported_log_rate_limit_interval)
5487 0 : return 0;
5488 :
5489 0 : if (c->log_rate_limit_interval_usec == 0)
5490 0 : return 0;
5491 :
5492 0 : p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5493 :
5494 0 : if (asprintf(&buf, "%" PRIu64, c->log_rate_limit_interval_usec) < 0)
5495 0 : return log_oom();
5496 :
5497 0 : r = symlink_atomic(buf, p);
5498 0 : if (r < 0)
5499 0 : return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5500 :
5501 0 : u->exported_log_rate_limit_interval = true;
5502 0 : return 0;
5503 : }
5504 :
5505 0 : static int unit_export_log_rate_limit_burst(Unit *u, const ExecContext *c) {
5506 0 : _cleanup_free_ char *buf = NULL;
5507 : const char *p;
5508 : int r;
5509 :
5510 0 : assert(u);
5511 0 : assert(c);
5512 :
5513 0 : if (u->exported_log_rate_limit_burst)
5514 0 : return 0;
5515 :
5516 0 : if (c->log_rate_limit_burst == 0)
5517 0 : return 0;
5518 :
5519 0 : p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5520 :
5521 0 : if (asprintf(&buf, "%u", c->log_rate_limit_burst) < 0)
5522 0 : return log_oom();
5523 :
5524 0 : r = symlink_atomic(buf, p);
5525 0 : if (r < 0)
5526 0 : return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5527 :
5528 0 : u->exported_log_rate_limit_burst = true;
5529 0 : return 0;
5530 : }
5531 :
5532 6 : void unit_export_state_files(Unit *u) {
5533 : const ExecContext *c;
5534 :
5535 6 : assert(u);
5536 :
5537 6 : if (!u->id)
5538 0 : return;
5539 :
5540 6 : if (!MANAGER_IS_SYSTEM(u->manager))
5541 6 : return;
5542 :
5543 0 : if (MANAGER_IS_TEST_RUN(u->manager))
5544 0 : return;
5545 :
5546 : /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5547 : * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5548 : * the IPC system itself and PID 1 also log to the journal.
5549 : *
5550 : * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5551 : * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5552 : * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5553 : * namespace at least.
5554 : *
5555 : * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5556 : * better for storing small bits of data, in particular as we can write them with two system calls, and read
5557 : * them with one. */
5558 :
5559 0 : (void) unit_export_invocation_id(u);
5560 :
5561 0 : c = unit_get_exec_context(u);
5562 0 : if (c) {
5563 0 : (void) unit_export_log_level_max(u, c);
5564 0 : (void) unit_export_log_extra_fields(u, c);
5565 0 : (void) unit_export_log_rate_limit_interval(u, c);
5566 0 : (void) unit_export_log_rate_limit_burst(u, c);
5567 : }
5568 : }
5569 :
5570 2167 : void unit_unlink_state_files(Unit *u) {
5571 : const char *p;
5572 :
5573 2167 : assert(u);
5574 :
5575 2167 : if (!u->id)
5576 2 : return;
5577 :
5578 2165 : if (!MANAGER_IS_SYSTEM(u->manager))
5579 2165 : return;
5580 :
5581 : /* Undoes the effect of unit_export_state() */
5582 :
5583 0 : if (u->exported_invocation_id) {
5584 0 : p = strjoina("/run/systemd/units/invocation:", u->id);
5585 0 : (void) unlink(p);
5586 :
5587 0 : u->exported_invocation_id = false;
5588 : }
5589 :
5590 0 : if (u->exported_log_level_max) {
5591 0 : p = strjoina("/run/systemd/units/log-level-max:", u->id);
5592 0 : (void) unlink(p);
5593 :
5594 0 : u->exported_log_level_max = false;
5595 : }
5596 :
5597 0 : if (u->exported_log_extra_fields) {
5598 0 : p = strjoina("/run/systemd/units/extra-fields:", u->id);
5599 0 : (void) unlink(p);
5600 :
5601 0 : u->exported_log_extra_fields = false;
5602 : }
5603 :
5604 0 : if (u->exported_log_rate_limit_interval) {
5605 0 : p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5606 0 : (void) unlink(p);
5607 :
5608 0 : u->exported_log_rate_limit_interval = false;
5609 : }
5610 :
5611 0 : if (u->exported_log_rate_limit_burst) {
5612 0 : p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5613 0 : (void) unlink(p);
5614 :
5615 0 : u->exported_log_rate_limit_burst = false;
5616 : }
5617 : }
5618 :
5619 6 : int unit_prepare_exec(Unit *u) {
5620 : int r;
5621 :
5622 6 : assert(u);
5623 :
5624 : /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5625 : * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5626 6 : r = bpf_firewall_load_custom(u);
5627 6 : if (r < 0)
5628 0 : return r;
5629 :
5630 : /* Prepares everything so that we can fork of a process for this unit */
5631 :
5632 6 : (void) unit_realize_cgroup(u);
5633 :
5634 6 : if (u->reset_accounting) {
5635 6 : (void) unit_reset_accounting(u);
5636 6 : u->reset_accounting = false;
5637 : }
5638 :
5639 6 : unit_export_state_files(u);
5640 :
5641 6 : r = unit_setup_exec_runtime(u);
5642 6 : if (r < 0)
5643 0 : return r;
5644 :
5645 6 : r = unit_setup_dynamic_creds(u);
5646 6 : if (r < 0)
5647 0 : return r;
5648 :
5649 6 : return 0;
5650 : }
5651 :
5652 0 : static int log_leftover(pid_t pid, int sig, void *userdata) {
5653 0 : _cleanup_free_ char *comm = NULL;
5654 :
5655 0 : (void) get_process_comm(pid, &comm);
5656 :
5657 0 : if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5658 0 : return 0;
5659 :
5660 0 : log_unit_warning(userdata,
5661 : "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5662 : "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5663 : pid, strna(comm));
5664 :
5665 0 : return 1;
5666 : }
5667 :
5668 6 : int unit_warn_leftover_processes(Unit *u) {
5669 6 : assert(u);
5670 :
5671 6 : (void) unit_pick_cgroup_path(u);
5672 :
5673 6 : if (!u->cgroup_path)
5674 0 : return 0;
5675 :
5676 6 : return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5677 : }
5678 :
5679 1749 : bool unit_needs_console(Unit *u) {
5680 : ExecContext *ec;
5681 : UnitActiveState state;
5682 :
5683 1749 : assert(u);
5684 :
5685 1749 : state = unit_active_state(u);
5686 :
5687 1749 : if (UNIT_IS_INACTIVE_OR_FAILED(state))
5688 7 : return false;
5689 :
5690 1742 : if (UNIT_VTABLE(u)->needs_console)
5691 6 : return UNIT_VTABLE(u)->needs_console(u);
5692 :
5693 : /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5694 1736 : ec = unit_get_exec_context(u);
5695 1736 : if (!ec)
5696 1505 : return false;
5697 :
5698 231 : return exec_context_may_touch_console(ec);
5699 : }
5700 :
5701 0 : const char *unit_label_path(Unit *u) {
5702 : const char *p;
5703 :
5704 : /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5705 : * when validating access checks. */
5706 :
5707 0 : p = u->source_path ?: u->fragment_path;
5708 0 : if (!p)
5709 0 : return NULL;
5710 :
5711 : /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5712 0 : if (path_equal(p, "/dev/null"))
5713 0 : return NULL;
5714 :
5715 0 : return p;
5716 : }
5717 :
5718 0 : int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5719 : int r;
5720 :
5721 0 : assert(u);
5722 :
5723 : /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5724 : * and not a kernel thread either */
5725 :
5726 : /* First, a simple range check */
5727 0 : if (!pid_is_valid(pid))
5728 0 : return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5729 :
5730 : /* Some extra safety check */
5731 0 : if (pid == 1 || pid == getpid_cached())
5732 0 : return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5733 :
5734 : /* Don't even begin to bother with kernel threads */
5735 0 : r = is_kernel_thread(pid);
5736 0 : if (r == -ESRCH)
5737 0 : return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5738 0 : if (r < 0)
5739 0 : return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5740 0 : if (r > 0)
5741 0 : return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5742 :
5743 0 : return 0;
5744 : }
5745 :
5746 7 : void unit_log_success(Unit *u) {
5747 7 : assert(u);
5748 :
5749 7 : log_struct(LOG_INFO,
5750 : "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5751 : LOG_UNIT_ID(u),
5752 : LOG_UNIT_INVOCATION_ID(u),
5753 : LOG_UNIT_MESSAGE(u, "Succeeded."));
5754 7 : }
5755 :
5756 0 : void unit_log_failure(Unit *u, const char *result) {
5757 0 : assert(u);
5758 0 : assert(result);
5759 :
5760 0 : log_struct(LOG_WARNING,
5761 : "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5762 : LOG_UNIT_ID(u),
5763 : LOG_UNIT_INVOCATION_ID(u),
5764 : LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5765 : "UNIT_RESULT=%s", result);
5766 0 : }
5767 :
5768 0 : void unit_log_skip(Unit *u, const char *result) {
5769 0 : assert(u);
5770 0 : assert(result);
5771 :
5772 0 : log_struct(LOG_INFO,
5773 : "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5774 : LOG_UNIT_ID(u),
5775 : LOG_UNIT_INVOCATION_ID(u),
5776 : LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5777 : "UNIT_RESULT=%s", result);
5778 0 : }
5779 :
5780 0 : void unit_log_process_exit(
5781 : Unit *u,
5782 : const char *kind,
5783 : const char *command,
5784 : bool success,
5785 : int code,
5786 : int status) {
5787 :
5788 : int level;
5789 :
5790 0 : assert(u);
5791 0 : assert(kind);
5792 :
5793 : /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5794 : * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5795 : * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5796 : * WARNING. */
5797 0 : if (success)
5798 0 : level = LOG_DEBUG;
5799 0 : else if (code == CLD_EXITED)
5800 0 : level = LOG_NOTICE;
5801 : else
5802 0 : level = LOG_WARNING;
5803 :
5804 0 : log_struct(level,
5805 : "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5806 : LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5807 : kind,
5808 : sigchld_code_to_string(code), status,
5809 : strna(code == CLD_EXITED
5810 : ? exit_status_to_string(status, EXIT_STATUS_FULL)
5811 : : signal_to_string(status))),
5812 : "EXIT_CODE=%s", sigchld_code_to_string(code),
5813 : "EXIT_STATUS=%i", status,
5814 : "COMMAND=%s", strna(command),
5815 : LOG_UNIT_ID(u),
5816 : LOG_UNIT_INVOCATION_ID(u));
5817 0 : }
5818 :
5819 7 : int unit_exit_status(Unit *u) {
5820 7 : assert(u);
5821 :
5822 : /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5823 : * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5824 : * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5825 : * service process has exited abnormally (signal/coredump). */
5826 :
5827 7 : if (!UNIT_VTABLE(u)->exit_status)
5828 7 : return -EOPNOTSUPP;
5829 :
5830 0 : return UNIT_VTABLE(u)->exit_status(u);
5831 : }
5832 :
5833 0 : int unit_failure_action_exit_status(Unit *u) {
5834 : int r;
5835 :
5836 0 : assert(u);
5837 :
5838 : /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5839 :
5840 0 : if (u->failure_action_exit_status >= 0)
5841 0 : return u->failure_action_exit_status;
5842 :
5843 0 : r = unit_exit_status(u);
5844 0 : if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5845 0 : return 255;
5846 :
5847 0 : return r;
5848 : }
5849 :
5850 7 : int unit_success_action_exit_status(Unit *u) {
5851 : int r;
5852 :
5853 7 : assert(u);
5854 :
5855 : /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5856 :
5857 7 : if (u->success_action_exit_status >= 0)
5858 0 : return u->success_action_exit_status;
5859 :
5860 7 : r = unit_exit_status(u);
5861 7 : if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5862 0 : return 255;
5863 :
5864 7 : return r;
5865 : }
5866 :
5867 7 : int unit_test_trigger_loaded(Unit *u) {
5868 : Unit *trigger;
5869 :
5870 : /* Tests whether the unit to trigger is loaded */
5871 :
5872 7 : trigger = UNIT_TRIGGER(u);
5873 7 : if (!trigger)
5874 0 : return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5875 : "Refusing to start, no unit to trigger.");
5876 7 : if (trigger->load_state != UNIT_LOADED)
5877 0 : return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5878 : "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5879 :
5880 7 : return 0;
5881 : }
5882 :
5883 0 : int unit_clean(Unit *u, ExecCleanMask mask) {
5884 : UnitActiveState state;
5885 :
5886 0 : assert(u);
5887 :
5888 : /* Special return values:
5889 : *
5890 : * -EOPNOTSUPP → cleaning not supported for this unit type
5891 : * -EUNATCH → cleaning not defined for this resource type
5892 : * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5893 : * a job queued or similar
5894 : */
5895 :
5896 0 : if (!UNIT_VTABLE(u)->clean)
5897 0 : return -EOPNOTSUPP;
5898 :
5899 0 : if (mask == 0)
5900 0 : return -EUNATCH;
5901 :
5902 0 : if (u->load_state != UNIT_LOADED)
5903 0 : return -EBUSY;
5904 :
5905 0 : if (u->job)
5906 0 : return -EBUSY;
5907 :
5908 0 : state = unit_active_state(u);
5909 0 : if (!IN_SET(state, UNIT_INACTIVE))
5910 0 : return -EBUSY;
5911 :
5912 0 : return UNIT_VTABLE(u)->clean(u, mask);
5913 : }
5914 :
5915 0 : int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5916 0 : assert(u);
5917 :
5918 0 : if (!UNIT_VTABLE(u)->clean ||
5919 0 : u->load_state != UNIT_LOADED) {
5920 0 : *ret = 0;
5921 0 : return 0;
5922 : }
5923 :
5924 : /* When the clean() method is set, can_clean() really should be set too */
5925 0 : assert(UNIT_VTABLE(u)->can_clean);
5926 :
5927 0 : return UNIT_VTABLE(u)->can_clean(u, ret);
5928 : }
5929 :
5930 : static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5931 : [COLLECT_INACTIVE] = "inactive",
5932 : [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5933 : };
5934 :
5935 1198 : DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);
|