Line data Source code
1 : /* SPDX-License-Identifier: LGPL-2.1+ */
2 :
3 : #if HAVE_SELINUX
4 : #include <selinux/selinux.h>
5 : #endif
6 :
7 : #include "alloc-util.h"
8 : #include "audit-util.h"
9 : #include "cgroup-util.h"
10 : #include "env-util.h"
11 : #include "fd-util.h"
12 : #include "fileio.h"
13 : #include "fs-util.h"
14 : #include "io-util.h"
15 : #include "journal-util.h"
16 : #include "journald-context.h"
17 : #include "parse-util.h"
18 : #include "path-util.h"
19 : #include "process-util.h"
20 : #include "procfs-util.h"
21 : #include "string-util.h"
22 : #include "syslog-util.h"
23 : #include "unaligned.h"
24 : #include "user-util.h"
25 :
26 : /* This implements a metadata cache for clients, which are identified by their PID. Requesting metadata through /proc
27 : * is expensive, hence let's cache the data if we can. Note that this means the metadata might be out-of-date when we
28 : * store it, but it might already be anyway, as we request the data asynchronously from /proc at a different time the
29 : * log entry was originally created. We hence just increase the "window of inaccuracy" a bit.
30 : *
31 : * The cache is indexed by the PID. Entries may be "pinned" in the cache, in which case the entries are not removed
32 : * until they are unpinned. Unpinned entries are kept around until cache pressure is seen. Cache entries older than 5s
33 : * are never used (a sad attempt to deal with the UNIX weakness of PIDs reuse), cache entries older than 1s are
34 : * refreshed in an incremental way (meaning: data is reread from /proc, but any old data we can't refresh is not
35 : * flushed out). Data newer than 1s is used immediately without refresh.
36 : *
37 : * Log stream clients (i.e. all clients using the AF_UNIX/SOCK_STREAM stdout/stderr transport) will pin a cache entry
38 : * as long as their socket is connected. Note that cache entries are shared between different transports. That means a
39 : * cache entry pinned for the stream connection logic may be reused for the syslog or native protocols.
40 : *
41 : * Caching metadata like this has two major benefits:
42 : *
43 : * 1. Reading metadata is expensive, and we can thus substantially speed up log processing under flood.
44 : *
45 : * 2. Because metadata caching is shared between stream and datagram transports and stream connections pin a cache
46 : * entry there's a good chance we can properly map a substantial set of datagram log messages to their originating
47 : * service, as all services (unless explicitly configured otherwise) will have their stdout/stderr connected to a
48 : * stream connection. This should improve cases where a service process logs immediately before exiting and we
49 : * previously had trouble associating the log message with the service.
50 : *
51 : * NB: With and without the metadata cache: the implicitly added entry metadata in the journal (with the exception of
52 : * UID/PID/GID and SELinux label) must be understood as possibly slightly out of sync (i.e. sometimes slightly older
53 : * and sometimes slightly newer than what was current at the log event).
54 : */
55 :
56 : /* We refresh every 1s */
57 : #define REFRESH_USEC (1*USEC_PER_SEC)
58 :
59 : /* Data older than 5s we flush out */
60 : #define MAX_USEC (5*USEC_PER_SEC)
61 :
62 : /* Keep at most 16K entries in the cache. (Note though that this limit may be violated if enough streams pin entries in
63 : * the cache, in which case we *do* permit this limit to be breached. That's safe however, as the number of stream
64 : * clients itself is limited.) */
65 : #define CACHE_MAX_FALLBACK 128U
66 : #define CACHE_MAX_MAX (16*1024U)
67 : #define CACHE_MAX_MIN 64U
68 :
69 0 : static size_t cache_max(void) {
70 : static size_t cached = -1;
71 :
72 0 : if (cached == (size_t) -1) {
73 : uint64_t mem_total;
74 : int r;
75 :
76 0 : r = procfs_memory_get(&mem_total, NULL);
77 0 : if (r < 0) {
78 0 : log_warning_errno(r, "Cannot query /proc/meminfo for MemTotal: %m");
79 0 : cached = CACHE_MAX_FALLBACK;
80 : } else
81 : /* Cache entries are usually a few kB, but the process cmdline is controlled by the
82 : * user and can be up to _SC_ARG_MAX, usually 2MB. Let's say that approximately up to
83 : * 1/8th of memory may be used by the cache.
84 : *
85 : * In the common case, this formula gives 64 cache entries for each GB of RAM.
86 : */
87 0 : cached = CLAMP(mem_total / 8 / sc_arg_max(), CACHE_MAX_MIN, CACHE_MAX_MAX);
88 : }
89 :
90 0 : return cached;
91 : }
92 :
93 0 : static int client_context_compare(const void *a, const void *b) {
94 0 : const ClientContext *x = a, *y = b;
95 : int r;
96 :
97 0 : r = CMP(x->timestamp, y->timestamp);
98 0 : if (r != 0)
99 0 : return r;
100 :
101 0 : return CMP(x->pid, y->pid);
102 : }
103 :
104 0 : static int client_context_new(Server *s, pid_t pid, ClientContext **ret) {
105 : ClientContext *c;
106 : int r;
107 :
108 0 : assert(s);
109 0 : assert(pid_is_valid(pid));
110 0 : assert(ret);
111 :
112 0 : r = hashmap_ensure_allocated(&s->client_contexts, NULL);
113 0 : if (r < 0)
114 0 : return r;
115 :
116 0 : r = prioq_ensure_allocated(&s->client_contexts_lru, client_context_compare);
117 0 : if (r < 0)
118 0 : return r;
119 :
120 0 : c = new0(ClientContext, 1);
121 0 : if (!c)
122 0 : return -ENOMEM;
123 :
124 0 : c->pid = pid;
125 :
126 0 : c->uid = UID_INVALID;
127 0 : c->gid = GID_INVALID;
128 0 : c->auditid = AUDIT_SESSION_INVALID;
129 0 : c->loginuid = UID_INVALID;
130 0 : c->owner_uid = UID_INVALID;
131 0 : c->lru_index = PRIOQ_IDX_NULL;
132 0 : c->timestamp = USEC_INFINITY;
133 0 : c->extra_fields_mtime = NSEC_INFINITY;
134 0 : c->log_level_max = -1;
135 0 : c->log_rate_limit_interval = s->rate_limit_interval;
136 0 : c->log_rate_limit_burst = s->rate_limit_burst;
137 :
138 0 : r = hashmap_put(s->client_contexts, PID_TO_PTR(pid), c);
139 0 : if (r < 0) {
140 0 : free(c);
141 0 : return r;
142 : }
143 :
144 0 : *ret = c;
145 0 : return 0;
146 : }
147 :
148 0 : static void client_context_reset(Server *s, ClientContext *c) {
149 0 : assert(s);
150 0 : assert(c);
151 :
152 0 : c->timestamp = USEC_INFINITY;
153 :
154 0 : c->uid = UID_INVALID;
155 0 : c->gid = GID_INVALID;
156 :
157 0 : c->comm = mfree(c->comm);
158 0 : c->exe = mfree(c->exe);
159 0 : c->cmdline = mfree(c->cmdline);
160 0 : c->capeff = mfree(c->capeff);
161 :
162 0 : c->auditid = AUDIT_SESSION_INVALID;
163 0 : c->loginuid = UID_INVALID;
164 :
165 0 : c->cgroup = mfree(c->cgroup);
166 0 : c->session = mfree(c->session);
167 0 : c->owner_uid = UID_INVALID;
168 0 : c->unit = mfree(c->unit);
169 0 : c->user_unit = mfree(c->user_unit);
170 0 : c->slice = mfree(c->slice);
171 0 : c->user_slice = mfree(c->user_slice);
172 :
173 0 : c->invocation_id = SD_ID128_NULL;
174 :
175 0 : c->label = mfree(c->label);
176 0 : c->label_size = 0;
177 :
178 0 : c->extra_fields_iovec = mfree(c->extra_fields_iovec);
179 0 : c->extra_fields_n_iovec = 0;
180 0 : c->extra_fields_data = mfree(c->extra_fields_data);
181 0 : c->extra_fields_mtime = NSEC_INFINITY;
182 :
183 0 : c->log_level_max = -1;
184 :
185 0 : c->log_rate_limit_interval = s->rate_limit_interval;
186 0 : c->log_rate_limit_burst = s->rate_limit_burst;
187 0 : }
188 :
189 0 : static ClientContext* client_context_free(Server *s, ClientContext *c) {
190 0 : assert(s);
191 :
192 0 : if (!c)
193 0 : return NULL;
194 :
195 0 : assert_se(hashmap_remove(s->client_contexts, PID_TO_PTR(c->pid)) == c);
196 :
197 0 : if (c->in_lru)
198 0 : assert_se(prioq_remove(s->client_contexts_lru, c, &c->lru_index) >= 0);
199 :
200 0 : client_context_reset(s, c);
201 :
202 0 : return mfree(c);
203 : }
204 :
205 0 : static void client_context_read_uid_gid(ClientContext *c, const struct ucred *ucred) {
206 0 : assert(c);
207 0 : assert(pid_is_valid(c->pid));
208 :
209 : /* The ucred data passed in is always the most current and accurate, if we have any. Use it. */
210 0 : if (ucred && uid_is_valid(ucred->uid))
211 0 : c->uid = ucred->uid;
212 : else
213 0 : (void) get_process_uid(c->pid, &c->uid);
214 :
215 0 : if (ucred && gid_is_valid(ucred->gid))
216 0 : c->gid = ucred->gid;
217 : else
218 0 : (void) get_process_gid(c->pid, &c->gid);
219 0 : }
220 :
221 0 : static void client_context_read_basic(ClientContext *c) {
222 : char *t;
223 :
224 0 : assert(c);
225 0 : assert(pid_is_valid(c->pid));
226 :
227 0 : if (get_process_comm(c->pid, &t) >= 0)
228 0 : free_and_replace(c->comm, t);
229 :
230 0 : if (get_process_exe(c->pid, &t) >= 0)
231 0 : free_and_replace(c->exe, t);
232 :
233 0 : if (get_process_cmdline(c->pid, SIZE_MAX, 0, &t) >= 0)
234 0 : free_and_replace(c->cmdline, t);
235 :
236 0 : if (get_process_capeff(c->pid, &t) >= 0)
237 0 : free_and_replace(c->capeff, t);
238 0 : }
239 :
240 0 : static int client_context_read_label(
241 : ClientContext *c,
242 : const char *label, size_t label_size) {
243 :
244 0 : assert(c);
245 0 : assert(pid_is_valid(c->pid));
246 0 : assert(label_size == 0 || label);
247 :
248 0 : if (label_size > 0) {
249 : char *l;
250 :
251 : /* If we got an SELinux label passed in it counts. */
252 :
253 0 : l = newdup_suffix0(char, label, label_size);
254 0 : if (!l)
255 0 : return -ENOMEM;
256 :
257 0 : free_and_replace(c->label, l);
258 0 : c->label_size = label_size;
259 : }
260 : #if HAVE_SELINUX
261 : else {
262 : char *con;
263 :
264 : /* If we got no SELinux label passed in, let's try to acquire one */
265 :
266 0 : if (getpidcon(c->pid, &con) >= 0) {
267 0 : free_and_replace(c->label, con);
268 0 : c->label_size = strlen(c->label);
269 : }
270 : }
271 : #endif
272 :
273 0 : return 0;
274 : }
275 :
276 0 : static int client_context_read_cgroup(Server *s, ClientContext *c, const char *unit_id) {
277 0 : _cleanup_free_ char *t = NULL;
278 : int r;
279 :
280 0 : assert(c);
281 :
282 : /* Try to acquire the current cgroup path */
283 0 : r = cg_pid_get_path_shifted(c->pid, s->cgroup_root, &t);
284 0 : if (r < 0 || empty_or_root(t)) {
285 : /* We use the unit ID passed in as fallback if we have nothing cached yet and cg_pid_get_path_shifted()
286 : * failed or process is running in a root cgroup. Zombie processes are automatically migrated to root cgroup
287 : * on cgroup v1 and we want to be able to map log messages from them too. */
288 0 : if (unit_id && !c->unit) {
289 0 : c->unit = strdup(unit_id);
290 0 : if (c->unit)
291 0 : return 0;
292 : }
293 :
294 0 : return r;
295 : }
296 :
297 : /* Let's shortcut this if the cgroup path didn't change */
298 0 : if (streq_ptr(c->cgroup, t))
299 0 : return 0;
300 :
301 0 : free_and_replace(c->cgroup, t);
302 :
303 0 : (void) cg_path_get_session(c->cgroup, &t);
304 0 : free_and_replace(c->session, t);
305 :
306 0 : if (cg_path_get_owner_uid(c->cgroup, &c->owner_uid) < 0)
307 0 : c->owner_uid = UID_INVALID;
308 :
309 0 : (void) cg_path_get_unit(c->cgroup, &t);
310 0 : free_and_replace(c->unit, t);
311 :
312 0 : (void) cg_path_get_user_unit(c->cgroup, &t);
313 0 : free_and_replace(c->user_unit, t);
314 :
315 0 : (void) cg_path_get_slice(c->cgroup, &t);
316 0 : free_and_replace(c->slice, t);
317 :
318 0 : (void) cg_path_get_user_slice(c->cgroup, &t);
319 0 : free_and_replace(c->user_slice, t);
320 :
321 0 : return 0;
322 : }
323 :
324 0 : static int client_context_read_invocation_id(
325 : Server *s,
326 : ClientContext *c) {
327 :
328 0 : _cleanup_free_ char *value = NULL;
329 : const char *p;
330 : int r;
331 :
332 0 : assert(s);
333 0 : assert(c);
334 :
335 : /* Read the invocation ID of a unit off a unit. PID 1 stores it in a per-unit symlink in /run/systemd/units/ */
336 :
337 0 : if (!c->unit)
338 0 : return 0;
339 :
340 0 : p = strjoina("/run/systemd/units/invocation:", c->unit);
341 0 : r = readlink_malloc(p, &value);
342 0 : if (r < 0)
343 0 : return r;
344 :
345 0 : return sd_id128_from_string(value, &c->invocation_id);
346 : }
347 :
348 0 : static int client_context_read_log_level_max(
349 : Server *s,
350 : ClientContext *c) {
351 :
352 0 : _cleanup_free_ char *value = NULL;
353 : const char *p;
354 : int r, ll;
355 :
356 0 : if (!c->unit)
357 0 : return 0;
358 :
359 0 : p = strjoina("/run/systemd/units/log-level-max:", c->unit);
360 0 : r = readlink_malloc(p, &value);
361 0 : if (r < 0)
362 0 : return r;
363 :
364 0 : ll = log_level_from_string(value);
365 0 : if (ll < 0)
366 0 : return -EINVAL;
367 :
368 0 : c->log_level_max = ll;
369 0 : return 0;
370 : }
371 :
372 0 : static int client_context_read_extra_fields(
373 : Server *s,
374 : ClientContext *c) {
375 :
376 0 : size_t size = 0, n_iovec = 0, n_allocated = 0, left;
377 0 : _cleanup_free_ struct iovec *iovec = NULL;
378 0 : _cleanup_free_ void *data = NULL;
379 0 : _cleanup_fclose_ FILE *f = NULL;
380 : struct stat st;
381 : const char *p;
382 : uint8_t *q;
383 : int r;
384 :
385 0 : if (!c->unit)
386 0 : return 0;
387 :
388 0 : p = strjoina("/run/systemd/units/log-extra-fields:", c->unit);
389 :
390 0 : if (c->extra_fields_mtime != NSEC_INFINITY) {
391 0 : if (stat(p, &st) < 0) {
392 0 : if (errno == ENOENT)
393 0 : return 0;
394 :
395 0 : return -errno;
396 : }
397 :
398 0 : if (timespec_load_nsec(&st.st_mtim) == c->extra_fields_mtime)
399 0 : return 0;
400 : }
401 :
402 0 : f = fopen(p, "re");
403 0 : if (!f) {
404 0 : if (errno == ENOENT)
405 0 : return 0;
406 :
407 0 : return -errno;
408 : }
409 :
410 0 : if (fstat(fileno(f), &st) < 0) /* The file might have been replaced since the stat() above, let's get a new
411 : * one, that matches the stuff we are reading */
412 0 : return -errno;
413 :
414 0 : r = read_full_stream(f, (char**) &data, &size);
415 0 : if (r < 0)
416 0 : return r;
417 :
418 0 : q = data, left = size;
419 0 : while (left > 0) {
420 : uint8_t *field, *eq;
421 : uint64_t v, n;
422 :
423 0 : if (left < sizeof(uint64_t))
424 0 : return -EBADMSG;
425 :
426 0 : v = unaligned_read_le64(q);
427 0 : if (v < 2)
428 0 : return -EBADMSG;
429 :
430 0 : n = sizeof(uint64_t) + v;
431 0 : if (left < n)
432 0 : return -EBADMSG;
433 :
434 0 : field = q + sizeof(uint64_t);
435 :
436 0 : eq = memchr(field, '=', v);
437 0 : if (!eq)
438 0 : return -EBADMSG;
439 :
440 0 : if (!journal_field_valid((const char *) field, eq - field, false))
441 0 : return -EBADMSG;
442 :
443 0 : if (!GREEDY_REALLOC(iovec, n_allocated, n_iovec+1))
444 0 : return -ENOMEM;
445 :
446 0 : iovec[n_iovec++] = IOVEC_MAKE(field, v);
447 :
448 0 : left -= n, q += n;
449 : }
450 :
451 0 : free(c->extra_fields_iovec);
452 0 : free(c->extra_fields_data);
453 :
454 0 : c->extra_fields_iovec = TAKE_PTR(iovec);
455 0 : c->extra_fields_n_iovec = n_iovec;
456 0 : c->extra_fields_data = TAKE_PTR(data);
457 0 : c->extra_fields_mtime = timespec_load_nsec(&st.st_mtim);
458 :
459 0 : return 0;
460 : }
461 :
462 0 : static int client_context_read_log_rate_limit_interval(ClientContext *c) {
463 0 : _cleanup_free_ char *value = NULL;
464 : const char *p;
465 : int r;
466 :
467 0 : assert(c);
468 :
469 0 : if (!c->unit)
470 0 : return 0;
471 :
472 0 : p = strjoina("/run/systemd/units/log-rate-limit-interval:", c->unit);
473 0 : r = readlink_malloc(p, &value);
474 0 : if (r < 0)
475 0 : return r;
476 :
477 0 : return safe_atou64(value, &c->log_rate_limit_interval);
478 : }
479 :
480 0 : static int client_context_read_log_rate_limit_burst(ClientContext *c) {
481 0 : _cleanup_free_ char *value = NULL;
482 : const char *p;
483 : int r;
484 :
485 0 : assert(c);
486 :
487 0 : if (!c->unit)
488 0 : return 0;
489 :
490 0 : p = strjoina("/run/systemd/units/log-rate-limit-burst:", c->unit);
491 0 : r = readlink_malloc(p, &value);
492 0 : if (r < 0)
493 0 : return r;
494 :
495 0 : return safe_atou(value, &c->log_rate_limit_burst);
496 : }
497 :
498 0 : static void client_context_really_refresh(
499 : Server *s,
500 : ClientContext *c,
501 : const struct ucred *ucred,
502 : const char *label, size_t label_size,
503 : const char *unit_id,
504 : usec_t timestamp) {
505 :
506 0 : assert(s);
507 0 : assert(c);
508 0 : assert(pid_is_valid(c->pid));
509 :
510 0 : if (timestamp == USEC_INFINITY)
511 0 : timestamp = now(CLOCK_MONOTONIC);
512 :
513 0 : client_context_read_uid_gid(c, ucred);
514 0 : client_context_read_basic(c);
515 0 : (void) client_context_read_label(c, label, label_size);
516 :
517 0 : (void) audit_session_from_pid(c->pid, &c->auditid);
518 0 : (void) audit_loginuid_from_pid(c->pid, &c->loginuid);
519 :
520 0 : (void) client_context_read_cgroup(s, c, unit_id);
521 0 : (void) client_context_read_invocation_id(s, c);
522 0 : (void) client_context_read_log_level_max(s, c);
523 0 : (void) client_context_read_extra_fields(s, c);
524 0 : (void) client_context_read_log_rate_limit_interval(c);
525 0 : (void) client_context_read_log_rate_limit_burst(c);
526 :
527 0 : c->timestamp = timestamp;
528 :
529 0 : if (c->in_lru) {
530 0 : assert(c->n_ref == 0);
531 0 : assert_se(prioq_reshuffle(s->client_contexts_lru, c, &c->lru_index) >= 0);
532 : }
533 0 : }
534 :
535 0 : void client_context_maybe_refresh(
536 : Server *s,
537 : ClientContext *c,
538 : const struct ucred *ucred,
539 : const char *label, size_t label_size,
540 : const char *unit_id,
541 : usec_t timestamp) {
542 :
543 0 : assert(s);
544 0 : assert(c);
545 :
546 0 : if (timestamp == USEC_INFINITY)
547 0 : timestamp = now(CLOCK_MONOTONIC);
548 :
549 : /* No cached data so far? Let's fill it up */
550 0 : if (c->timestamp == USEC_INFINITY)
551 0 : goto refresh;
552 :
553 : /* If the data isn't pinned and if the cashed data is older than the upper limit, we flush it out
554 : * entirely. This follows the logic that as long as an entry is pinned the PID reuse is unlikely. */
555 0 : if (c->n_ref == 0 && c->timestamp + MAX_USEC < timestamp) {
556 0 : client_context_reset(s, c);
557 0 : goto refresh;
558 : }
559 :
560 : /* If the data is older than the lower limit, we refresh, but keep the old data for all we can't update */
561 0 : if (c->timestamp + REFRESH_USEC < timestamp)
562 0 : goto refresh;
563 :
564 : /* If the data passed along doesn't match the cached data we also do a refresh */
565 0 : if (ucred && uid_is_valid(ucred->uid) && c->uid != ucred->uid)
566 0 : goto refresh;
567 :
568 0 : if (ucred && gid_is_valid(ucred->gid) && c->gid != ucred->gid)
569 0 : goto refresh;
570 :
571 0 : if (label_size > 0 && (label_size != c->label_size || memcmp(label, c->label, label_size) != 0))
572 0 : goto refresh;
573 :
574 0 : return;
575 :
576 0 : refresh:
577 0 : client_context_really_refresh(s, c, ucred, label, label_size, unit_id, timestamp);
578 : }
579 :
580 0 : static void client_context_try_shrink_to(Server *s, size_t limit) {
581 : ClientContext *c;
582 : usec_t t;
583 :
584 0 : assert(s);
585 :
586 : /* Flush any cache entries for PIDs that have already moved on. Don't do this
587 : * too often, since it's a slow process. */
588 0 : t = now(CLOCK_MONOTONIC);
589 0 : if (s->last_cache_pid_flush + MAX_USEC < t) {
590 0 : unsigned n = prioq_size(s->client_contexts_lru), idx = 0;
591 :
592 : /* We do a number of iterations based on the initial size of the prioq. When we remove an
593 : * item, a new item is moved into its places, and items to the right might be reshuffled.
594 : */
595 0 : for (unsigned i = 0; i < n; i++) {
596 0 : c = prioq_peek_by_index(s->client_contexts_lru, idx);
597 :
598 0 : assert(c->n_ref == 0);
599 :
600 0 : if (!pid_is_unwaited(c->pid))
601 0 : client_context_free(s, c);
602 : else
603 0 : idx ++;
604 : }
605 :
606 0 : s->last_cache_pid_flush = t;
607 : }
608 :
609 : /* Bring the number of cache entries below the indicated limit, so that we can create a new entry without
610 : * breaching the limit. Note that we only flush out entries that aren't pinned here. This means the number of
611 : * cache entries may very well grow beyond the limit, if all entries stored remain pinned. */
612 :
613 0 : while (hashmap_size(s->client_contexts) > limit) {
614 0 : c = prioq_pop(s->client_contexts_lru);
615 0 : if (!c)
616 0 : break; /* All remaining entries are pinned, give up */
617 :
618 0 : assert(c->in_lru);
619 0 : assert(c->n_ref == 0);
620 :
621 0 : c->in_lru = false;
622 :
623 0 : client_context_free(s, c);
624 : }
625 0 : }
626 :
627 0 : void client_context_flush_all(Server *s) {
628 0 : assert(s);
629 :
630 : /* Flush out all remaining entries. This assumes all references are already dropped. */
631 :
632 0 : s->my_context = client_context_release(s, s->my_context);
633 0 : s->pid1_context = client_context_release(s, s->pid1_context);
634 :
635 0 : client_context_try_shrink_to(s, 0);
636 :
637 0 : assert(prioq_size(s->client_contexts_lru) == 0);
638 0 : assert(hashmap_size(s->client_contexts) == 0);
639 :
640 0 : s->client_contexts_lru = prioq_free(s->client_contexts_lru);
641 0 : s->client_contexts = hashmap_free(s->client_contexts);
642 0 : }
643 :
644 0 : static int client_context_get_internal(
645 : Server *s,
646 : pid_t pid,
647 : const struct ucred *ucred,
648 : const char *label, size_t label_len,
649 : const char *unit_id,
650 : bool add_ref,
651 : ClientContext **ret) {
652 :
653 : ClientContext *c;
654 : int r;
655 :
656 0 : assert(s);
657 0 : assert(ret);
658 :
659 0 : if (!pid_is_valid(pid))
660 0 : return -EINVAL;
661 :
662 0 : c = hashmap_get(s->client_contexts, PID_TO_PTR(pid));
663 0 : if (c) {
664 :
665 0 : if (add_ref) {
666 0 : if (c->in_lru) {
667 : /* The entry wasn't pinned so far, let's remove it from the LRU list then */
668 0 : assert(c->n_ref == 0);
669 0 : assert_se(prioq_remove(s->client_contexts_lru, c, &c->lru_index) >= 0);
670 0 : c->in_lru = false;
671 : }
672 :
673 0 : c->n_ref++;
674 : }
675 :
676 0 : client_context_maybe_refresh(s, c, ucred, label, label_len, unit_id, USEC_INFINITY);
677 :
678 0 : *ret = c;
679 0 : return 0;
680 : }
681 :
682 0 : client_context_try_shrink_to(s, cache_max()-1);
683 :
684 0 : r = client_context_new(s, pid, &c);
685 0 : if (r < 0)
686 0 : return r;
687 :
688 0 : if (add_ref)
689 0 : c->n_ref++;
690 : else {
691 0 : r = prioq_put(s->client_contexts_lru, c, &c->lru_index);
692 0 : if (r < 0) {
693 0 : client_context_free(s, c);
694 0 : return r;
695 : }
696 :
697 0 : c->in_lru = true;
698 : }
699 :
700 0 : client_context_really_refresh(s, c, ucred, label, label_len, unit_id, USEC_INFINITY);
701 :
702 0 : *ret = c;
703 0 : return 0;
704 : }
705 :
706 0 : int client_context_get(
707 : Server *s,
708 : pid_t pid,
709 : const struct ucred *ucred,
710 : const char *label, size_t label_len,
711 : const char *unit_id,
712 : ClientContext **ret) {
713 :
714 0 : return client_context_get_internal(s, pid, ucred, label, label_len, unit_id, false, ret);
715 : }
716 :
717 0 : int client_context_acquire(
718 : Server *s,
719 : pid_t pid,
720 : const struct ucred *ucred,
721 : const char *label, size_t label_len,
722 : const char *unit_id,
723 : ClientContext **ret) {
724 :
725 0 : return client_context_get_internal(s, pid, ucred, label, label_len, unit_id, true, ret);
726 : };
727 :
728 0 : ClientContext *client_context_release(Server *s, ClientContext *c) {
729 0 : assert(s);
730 :
731 0 : if (!c)
732 0 : return NULL;
733 :
734 0 : assert(c->n_ref > 0);
735 0 : assert(!c->in_lru);
736 :
737 0 : c->n_ref--;
738 0 : if (c->n_ref > 0)
739 0 : return NULL;
740 :
741 : /* The entry is not pinned anymore, let's add it to the LRU prioq if we can. If we can't we'll drop it
742 : * right-away */
743 :
744 0 : if (prioq_put(s->client_contexts_lru, c, &c->lru_index) < 0)
745 0 : client_context_free(s, c);
746 : else
747 0 : c->in_lru = true;
748 :
749 0 : return NULL;
750 : }
751 :
752 0 : void client_context_acquire_default(Server *s) {
753 : int r;
754 :
755 0 : assert(s);
756 :
757 : /* Ensure that our own and PID1's contexts are always pinned. Our own context is particularly useful to
758 : * generate driver messages. */
759 :
760 0 : if (!s->my_context) {
761 0 : struct ucred ucred = {
762 0 : .pid = getpid_cached(),
763 0 : .uid = getuid(),
764 0 : .gid = getgid(),
765 : };
766 :
767 0 : r = client_context_acquire(s, ucred.pid, &ucred, NULL, 0, NULL, &s->my_context);
768 0 : if (r < 0)
769 0 : log_warning_errno(r, "Failed to acquire our own context, ignoring: %m");
770 : }
771 :
772 0 : if (!s->pid1_context) {
773 :
774 0 : r = client_context_acquire(s, 1, NULL, NULL, 0, NULL, &s->pid1_context);
775 0 : if (r < 0)
776 0 : log_warning_errno(r, "Failed to acquire PID1's context, ignoring: %m");
777 :
778 : }
779 0 : }
|