LCOV - code coverage report
Current view: top level - libsystemd/sd-event - sd-event.c (source / functions) Hit Total Coverage
Test: main_coverage.info Lines: 1392 1885 73.8 %
Date: 2019-08-22 15:41:25 Functions: 89 117 76.1 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: LGPL-2.1+ */
       2             : 
       3             : #include <sys/epoll.h>
       4             : #include <sys/timerfd.h>
       5             : #include <sys/wait.h>
       6             : 
       7             : #include "sd-daemon.h"
       8             : #include "sd-event.h"
       9             : #include "sd-id128.h"
      10             : 
      11             : #include "alloc-util.h"
      12             : #include "event-source.h"
      13             : #include "fd-util.h"
      14             : #include "fs-util.h"
      15             : #include "hashmap.h"
      16             : #include "list.h"
      17             : #include "macro.h"
      18             : #include "memory-util.h"
      19             : #include "missing.h"
      20             : #include "prioq.h"
      21             : #include "process-util.h"
      22             : #include "set.h"
      23             : #include "signal-util.h"
      24             : #include "string-table.h"
      25             : #include "string-util.h"
      26             : #include "strxcpyx.h"
      27             : #include "time-util.h"
      28             : 
      29             : #define DEFAULT_ACCURACY_USEC (250 * USEC_PER_MSEC)
      30             : 
      31             : static const char* const event_source_type_table[_SOURCE_EVENT_SOURCE_TYPE_MAX] = {
      32             :         [SOURCE_IO] = "io",
      33             :         [SOURCE_TIME_REALTIME] = "realtime",
      34             :         [SOURCE_TIME_BOOTTIME] = "bootime",
      35             :         [SOURCE_TIME_MONOTONIC] = "monotonic",
      36             :         [SOURCE_TIME_REALTIME_ALARM] = "realtime-alarm",
      37             :         [SOURCE_TIME_BOOTTIME_ALARM] = "boottime-alarm",
      38             :         [SOURCE_SIGNAL] = "signal",
      39             :         [SOURCE_CHILD] = "child",
      40             :         [SOURCE_DEFER] = "defer",
      41             :         [SOURCE_POST] = "post",
      42             :         [SOURCE_EXIT] = "exit",
      43             :         [SOURCE_WATCHDOG] = "watchdog",
      44             :         [SOURCE_INOTIFY] = "inotify",
      45             : };
      46             : 
      47           0 : DEFINE_PRIVATE_STRING_TABLE_LOOKUP_TO_STRING(event_source_type, int);
      48             : 
      49             : #define EVENT_SOURCE_IS_TIME(t) IN_SET((t), SOURCE_TIME_REALTIME, SOURCE_TIME_BOOTTIME, SOURCE_TIME_MONOTONIC, SOURCE_TIME_REALTIME_ALARM, SOURCE_TIME_BOOTTIME_ALARM)
      50             : 
      51             : struct sd_event {
      52             :         unsigned n_ref;
      53             : 
      54             :         int epoll_fd;
      55             :         int watchdog_fd;
      56             : 
      57             :         Prioq *pending;
      58             :         Prioq *prepare;
      59             : 
      60             :         /* timerfd_create() only supports these five clocks so far. We
      61             :          * can add support for more clocks when the kernel learns to
      62             :          * deal with them, too. */
      63             :         struct clock_data realtime;
      64             :         struct clock_data boottime;
      65             :         struct clock_data monotonic;
      66             :         struct clock_data realtime_alarm;
      67             :         struct clock_data boottime_alarm;
      68             : 
      69             :         usec_t perturb;
      70             : 
      71             :         sd_event_source **signal_sources; /* indexed by signal number */
      72             :         Hashmap *signal_data; /* indexed by priority */
      73             : 
      74             :         Hashmap *child_sources;
      75             :         unsigned n_enabled_child_sources;
      76             : 
      77             :         Set *post_sources;
      78             : 
      79             :         Prioq *exit;
      80             : 
      81             :         Hashmap *inotify_data; /* indexed by priority */
      82             : 
      83             :         /* A list of inode structures that still have an fd open, that we need to close before the next loop iteration */
      84             :         LIST_HEAD(struct inode_data, inode_data_to_close);
      85             : 
      86             :         /* A list of inotify objects that already have events buffered which aren't processed yet */
      87             :         LIST_HEAD(struct inotify_data, inotify_data_buffered);
      88             : 
      89             :         pid_t original_pid;
      90             : 
      91             :         uint64_t iteration;
      92             :         triple_timestamp timestamp;
      93             :         int state;
      94             : 
      95             :         bool exit_requested:1;
      96             :         bool need_process_child:1;
      97             :         bool watchdog:1;
      98             :         bool profile_delays:1;
      99             : 
     100             :         int exit_code;
     101             : 
     102             :         pid_t tid;
     103             :         sd_event **default_event_ptr;
     104             : 
     105             :         usec_t watchdog_last, watchdog_period;
     106             : 
     107             :         unsigned n_sources;
     108             : 
     109             :         LIST_HEAD(sd_event_source, sources);
     110             : 
     111             :         usec_t last_run, last_log;
     112             :         unsigned delays[sizeof(usec_t) * 8];
     113             : };
     114             : 
     115             : static thread_local sd_event *default_event = NULL;
     116             : 
     117             : static void source_disconnect(sd_event_source *s);
     118             : static void event_gc_inode_data(sd_event *e, struct inode_data *d);
     119             : 
     120      209739 : static sd_event *event_resolve(sd_event *e) {
     121      209739 :         return e == SD_EVENT_DEFAULT ? default_event : e;
     122             : }
     123             : 
     124       94827 : static int pending_prioq_compare(const void *a, const void *b) {
     125       94827 :         const sd_event_source *x = a, *y = b;
     126             :         int r;
     127             : 
     128       94827 :         assert(x->pending);
     129       94827 :         assert(y->pending);
     130             : 
     131             :         /* Enabled ones first */
     132       94827 :         if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
     133        9157 :                 return -1;
     134       85670 :         if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
     135        4133 :                 return 1;
     136             : 
     137             :         /* Lower priority values first */
     138       81537 :         r = CMP(x->priority, y->priority);
     139       81537 :         if (r != 0)
     140       55310 :                 return r;
     141             : 
     142             :         /* Older entries first */
     143       26227 :         return CMP(x->pending_iteration, y->pending_iteration);
     144             : }
     145             : 
     146     5284727 : static int prepare_prioq_compare(const void *a, const void *b) {
     147     5284727 :         const sd_event_source *x = a, *y = b;
     148             :         int r;
     149             : 
     150     5284727 :         assert(x->prepare);
     151     5284727 :         assert(y->prepare);
     152             : 
     153             :         /* Enabled ones first */
     154     5284727 :         if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
     155        1251 :                 return -1;
     156     5283476 :         if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
     157           1 :                 return 1;
     158             : 
     159             :         /* Move most recently prepared ones last, so that we can stop
     160             :          * preparing as soon as we hit one that has already been
     161             :          * prepared in the current iteration */
     162     5283475 :         r = CMP(x->prepare_iteration, y->prepare_iteration);
     163     5283475 :         if (r != 0)
     164     2490907 :                 return r;
     165             : 
     166             :         /* Lower priority values first */
     167     2792568 :         return CMP(x->priority, y->priority);
     168             : }
     169             : 
     170        6797 : static int earliest_time_prioq_compare(const void *a, const void *b) {
     171        6797 :         const sd_event_source *x = a, *y = b;
     172             : 
     173        6797 :         assert(EVENT_SOURCE_IS_TIME(x->type));
     174        6797 :         assert(x->type == y->type);
     175             : 
     176             :         /* Enabled ones first */
     177        6797 :         if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
     178        1721 :                 return -1;
     179        5076 :         if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
     180         787 :                 return 1;
     181             : 
     182             :         /* Move the pending ones to the end */
     183        4289 :         if (!x->pending && y->pending)
     184          81 :                 return -1;
     185        4208 :         if (x->pending && !y->pending)
     186           0 :                 return 1;
     187             : 
     188             :         /* Order by time */
     189        4208 :         return CMP(x->time.next, y->time.next);
     190             : }
     191             : 
     192        8505 : static usec_t time_event_source_latest(const sd_event_source *s) {
     193        8505 :         return usec_add(s->time.next, s->time.accuracy);
     194             : }
     195             : 
     196        6829 : static int latest_time_prioq_compare(const void *a, const void *b) {
     197        6829 :         const sd_event_source *x = a, *y = b;
     198             : 
     199        6829 :         assert(EVENT_SOURCE_IS_TIME(x->type));
     200        6829 :         assert(x->type == y->type);
     201             : 
     202             :         /* Enabled ones first */
     203        6829 :         if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
     204        1752 :                 return -1;
     205        5077 :         if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
     206         787 :                 return 1;
     207             : 
     208             :         /* Move the pending ones to the end */
     209        4290 :         if (!x->pending && y->pending)
     210          81 :                 return -1;
     211        4209 :         if (x->pending && !y->pending)
     212           0 :                 return 1;
     213             : 
     214             :         /* Order by time */
     215        4209 :         return CMP(time_event_source_latest(x), time_event_source_latest(y));
     216             : }
     217             : 
     218        8532 : static int exit_prioq_compare(const void *a, const void *b) {
     219        8532 :         const sd_event_source *x = a, *y = b;
     220             : 
     221        8532 :         assert(x->type == SOURCE_EXIT);
     222        8532 :         assert(y->type == SOURCE_EXIT);
     223             : 
     224             :         /* Enabled ones first */
     225        8532 :         if (x->enabled != SD_EVENT_OFF && y->enabled == SD_EVENT_OFF)
     226        1251 :                 return -1;
     227        7281 :         if (x->enabled == SD_EVENT_OFF && y->enabled != SD_EVENT_OFF)
     228           0 :                 return 1;
     229             : 
     230             :         /* Lower priority values first */
     231        7281 :         return CMP(x->priority, y->priority);
     232             : }
     233             : 
     234         185 : static void free_clock_data(struct clock_data *d) {
     235         185 :         assert(d);
     236         185 :         assert(d->wakeup == WAKEUP_CLOCK_DATA);
     237             : 
     238         185 :         safe_close(d->fd);
     239         185 :         prioq_free(d->earliest);
     240         185 :         prioq_free(d->latest);
     241         185 : }
     242             : 
     243          37 : static sd_event *event_free(sd_event *e) {
     244             :         sd_event_source *s;
     245             : 
     246          37 :         assert(e);
     247             : 
     248          40 :         while ((s = e->sources)) {
     249           3 :                 assert(s->floating);
     250           3 :                 source_disconnect(s);
     251           3 :                 sd_event_source_unref(s);
     252             :         }
     253             : 
     254          37 :         assert(e->n_sources == 0);
     255             : 
     256          37 :         if (e->default_event_ptr)
     257          25 :                 *(e->default_event_ptr) = NULL;
     258             : 
     259          37 :         safe_close(e->epoll_fd);
     260          37 :         safe_close(e->watchdog_fd);
     261             : 
     262          37 :         free_clock_data(&e->realtime);
     263          37 :         free_clock_data(&e->boottime);
     264          37 :         free_clock_data(&e->monotonic);
     265          37 :         free_clock_data(&e->realtime_alarm);
     266          37 :         free_clock_data(&e->boottime_alarm);
     267             : 
     268          37 :         prioq_free(e->pending);
     269          37 :         prioq_free(e->prepare);
     270          37 :         prioq_free(e->exit);
     271             : 
     272          37 :         free(e->signal_sources);
     273          37 :         hashmap_free(e->signal_data);
     274             : 
     275          37 :         hashmap_free(e->inotify_data);
     276             : 
     277          37 :         hashmap_free(e->child_sources);
     278          37 :         set_free(e->post_sources);
     279             : 
     280          37 :         return mfree(e);
     281             : }
     282             : 
     283          38 : _public_ int sd_event_new(sd_event** ret) {
     284             :         sd_event *e;
     285             :         int r;
     286             : 
     287          38 :         assert_return(ret, -EINVAL);
     288             : 
     289          38 :         e = new(sd_event, 1);
     290          38 :         if (!e)
     291           0 :                 return -ENOMEM;
     292             : 
     293          38 :         *e = (sd_event) {
     294             :                 .n_ref = 1,
     295             :                 .epoll_fd = -1,
     296             :                 .watchdog_fd = -1,
     297             :                 .realtime.wakeup = WAKEUP_CLOCK_DATA,
     298             :                 .realtime.fd = -1,
     299             :                 .realtime.next = USEC_INFINITY,
     300             :                 .boottime.wakeup = WAKEUP_CLOCK_DATA,
     301             :                 .boottime.fd = -1,
     302             :                 .boottime.next = USEC_INFINITY,
     303             :                 .monotonic.wakeup = WAKEUP_CLOCK_DATA,
     304             :                 .monotonic.fd = -1,
     305             :                 .monotonic.next = USEC_INFINITY,
     306             :                 .realtime_alarm.wakeup = WAKEUP_CLOCK_DATA,
     307             :                 .realtime_alarm.fd = -1,
     308             :                 .realtime_alarm.next = USEC_INFINITY,
     309             :                 .boottime_alarm.wakeup = WAKEUP_CLOCK_DATA,
     310             :                 .boottime_alarm.fd = -1,
     311             :                 .boottime_alarm.next = USEC_INFINITY,
     312             :                 .perturb = USEC_INFINITY,
     313          38 :                 .original_pid = getpid_cached(),
     314             :         };
     315             : 
     316          38 :         r = prioq_ensure_allocated(&e->pending, pending_prioq_compare);
     317          38 :         if (r < 0)
     318           0 :                 goto fail;
     319             : 
     320          38 :         e->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
     321          38 :         if (e->epoll_fd < 0) {
     322           0 :                 r = -errno;
     323           0 :                 goto fail;
     324             :         }
     325             : 
     326          38 :         e->epoll_fd = fd_move_above_stdio(e->epoll_fd);
     327             : 
     328          38 :         if (secure_getenv("SD_EVENT_PROFILE_DELAYS")) {
     329           0 :                 log_debug("Event loop profiling enabled. Logarithmic histogram of event loop iterations in the range 2^0 ... 2^63 us will be logged every 5s.");
     330           0 :                 e->profile_delays = true;
     331             :         }
     332             : 
     333          38 :         *ret = e;
     334          38 :         return 0;
     335             : 
     336           0 : fail:
     337           0 :         event_free(e);
     338           0 :         return r;
     339             : }
     340             : 
     341      109596 : DEFINE_PUBLIC_TRIVIAL_REF_UNREF_FUNC(sd_event, sd_event, event_free);
     342             : 
     343        3737 : _public_ sd_event_source* sd_event_source_disable_unref(sd_event_source *s) {
     344        3737 :         if (s)
     345        1865 :                 (void) sd_event_source_set_enabled(s, SD_EVENT_OFF);
     346        3737 :         return sd_event_source_unref(s);
     347             : }
     348             : 
     349     1006852 : static bool event_pid_changed(sd_event *e) {
     350     1006852 :         assert(e);
     351             : 
     352             :         /* We don't support people creating an event loop and keeping
     353             :          * it around over a fork(). Let's complain. */
     354             : 
     355     1006852 :         return e->original_pid != getpid_cached();
     356             : }
     357             : 
     358        1108 : static void source_io_unregister(sd_event_source *s) {
     359             :         int r;
     360             : 
     361        1108 :         assert(s);
     362        1108 :         assert(s->type == SOURCE_IO);
     363             : 
     364        1108 :         if (event_pid_changed(s->event))
     365           0 :                 return;
     366             : 
     367        1108 :         if (!s->io.registered)
     368         501 :                 return;
     369             : 
     370         607 :         r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, s->io.fd, NULL);
     371         607 :         if (r < 0)
     372           0 :                 log_debug_errno(errno, "Failed to remove source %s (type %s) from epoll: %m",
     373             :                                 strna(s->description), event_source_type_to_string(s->type));
     374             : 
     375         607 :         s->io.registered = false;
     376             : }
     377             : 
     378        1840 : static int source_io_register(
     379             :                 sd_event_source *s,
     380             :                 int enabled,
     381             :                 uint32_t events) {
     382             : 
     383             :         struct epoll_event ev;
     384             :         int r;
     385             : 
     386        1840 :         assert(s);
     387        1840 :         assert(s->type == SOURCE_IO);
     388        1840 :         assert(enabled != SD_EVENT_OFF);
     389             : 
     390        1840 :         ev = (struct epoll_event) {
     391        1840 :                 .events = events | (enabled == SD_EVENT_ONESHOT ? EPOLLONESHOT : 0),
     392             :                 .data.ptr = s,
     393             :         };
     394             : 
     395        1840 :         if (s->io.registered)
     396        1232 :                 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_MOD, s->io.fd, &ev);
     397             :         else
     398         608 :                 r = epoll_ctl(s->event->epoll_fd, EPOLL_CTL_ADD, s->io.fd, &ev);
     399        1840 :         if (r < 0)
     400           0 :                 return -errno;
     401             : 
     402        1840 :         s->io.registered = true;
     403             : 
     404        1840 :         return 0;
     405             : }
     406             : 
     407          43 : static clockid_t event_source_type_to_clock(EventSourceType t) {
     408             : 
     409          43 :         switch (t) {
     410             : 
     411           0 :         case SOURCE_TIME_REALTIME:
     412           0 :                 return CLOCK_REALTIME;
     413             : 
     414          43 :         case SOURCE_TIME_BOOTTIME:
     415          43 :                 return CLOCK_BOOTTIME;
     416             : 
     417           0 :         case SOURCE_TIME_MONOTONIC:
     418           0 :                 return CLOCK_MONOTONIC;
     419             : 
     420           0 :         case SOURCE_TIME_REALTIME_ALARM:
     421           0 :                 return CLOCK_REALTIME_ALARM;
     422             : 
     423           0 :         case SOURCE_TIME_BOOTTIME_ALARM:
     424           0 :                 return CLOCK_BOOTTIME_ALARM;
     425             : 
     426           0 :         default:
     427           0 :                 return (clockid_t) -1;
     428             :         }
     429             : }
     430             : 
     431         514 : static EventSourceType clock_to_event_source_type(clockid_t clock) {
     432             : 
     433         514 :         switch (clock) {
     434             : 
     435           0 :         case CLOCK_REALTIME:
     436           0 :                 return SOURCE_TIME_REALTIME;
     437             : 
     438          19 :         case CLOCK_BOOTTIME:
     439          19 :                 return SOURCE_TIME_BOOTTIME;
     440             : 
     441         495 :         case CLOCK_MONOTONIC:
     442         495 :                 return SOURCE_TIME_MONOTONIC;
     443             : 
     444           0 :         case CLOCK_REALTIME_ALARM:
     445           0 :                 return SOURCE_TIME_REALTIME_ALARM;
     446             : 
     447           0 :         case CLOCK_BOOTTIME_ALARM:
     448           0 :                 return SOURCE_TIME_BOOTTIME_ALARM;
     449             : 
     450           0 :         default:
     451           0 :                 return _SOURCE_EVENT_SOURCE_TYPE_INVALID;
     452             :         }
     453             : }
     454             : 
     455        2922 : static struct clock_data* event_get_clock_data(sd_event *e, EventSourceType t) {
     456        2922 :         assert(e);
     457             : 
     458        2922 :         switch (t) {
     459             : 
     460           0 :         case SOURCE_TIME_REALTIME:
     461           0 :                 return &e->realtime;
     462             : 
     463         280 :         case SOURCE_TIME_BOOTTIME:
     464         280 :                 return &e->boottime;
     465             : 
     466        2642 :         case SOURCE_TIME_MONOTONIC:
     467        2642 :                 return &e->monotonic;
     468             : 
     469           0 :         case SOURCE_TIME_REALTIME_ALARM:
     470           0 :                 return &e->realtime_alarm;
     471             : 
     472           0 :         case SOURCE_TIME_BOOTTIME_ALARM:
     473           0 :                 return &e->boottime_alarm;
     474             : 
     475           0 :         default:
     476           0 :                 return NULL;
     477             :         }
     478             : }
     479             : 
     480           4 : static void event_free_signal_data(sd_event *e, struct signal_data *d) {
     481           4 :         assert(e);
     482             : 
     483           4 :         if (!d)
     484           0 :                 return;
     485             : 
     486           4 :         hashmap_remove(e->signal_data, &d->priority);
     487           4 :         safe_close(d->fd);
     488           4 :         free(d);
     489             : }
     490             : 
     491           8 : static int event_make_signal_data(
     492             :                 sd_event *e,
     493             :                 int sig,
     494             :                 struct signal_data **ret) {
     495             : 
     496             :         struct epoll_event ev;
     497             :         struct signal_data *d;
     498           8 :         bool added = false;
     499             :         sigset_t ss_copy;
     500             :         int64_t priority;
     501             :         int r;
     502             : 
     503           8 :         assert(e);
     504             : 
     505           8 :         if (event_pid_changed(e))
     506           0 :                 return -ECHILD;
     507             : 
     508           8 :         if (e->signal_sources && e->signal_sources[sig])
     509           7 :                 priority = e->signal_sources[sig]->priority;
     510             :         else
     511           1 :                 priority = SD_EVENT_PRIORITY_NORMAL;
     512             : 
     513           8 :         d = hashmap_get(e->signal_data, &priority);
     514           8 :         if (d) {
     515           4 :                 if (sigismember(&d->sigset, sig) > 0) {
     516           1 :                         if (ret)
     517           0 :                                 *ret = d;
     518           1 :                         return 0;
     519             :                 }
     520             :         } else {
     521           4 :                 r = hashmap_ensure_allocated(&e->signal_data, &uint64_hash_ops);
     522           4 :                 if (r < 0)
     523           0 :                         return r;
     524             : 
     525           4 :                 d = new(struct signal_data, 1);
     526           4 :                 if (!d)
     527           0 :                         return -ENOMEM;
     528             : 
     529           4 :                 *d = (struct signal_data) {
     530             :                         .wakeup = WAKEUP_SIGNAL_DATA,
     531             :                         .fd = -1,
     532             :                         .priority = priority,
     533             :                 };
     534             : 
     535           4 :                 r = hashmap_put(e->signal_data, &d->priority, d);
     536           4 :                 if (r < 0) {
     537           0 :                         free(d);
     538           0 :                         return r;
     539             :                 }
     540             : 
     541           4 :                 added = true;
     542             :         }
     543             : 
     544           7 :         ss_copy = d->sigset;
     545           7 :         assert_se(sigaddset(&ss_copy, sig) >= 0);
     546             : 
     547           7 :         r = signalfd(d->fd, &ss_copy, SFD_NONBLOCK|SFD_CLOEXEC);
     548           7 :         if (r < 0) {
     549           0 :                 r = -errno;
     550           0 :                 goto fail;
     551             :         }
     552             : 
     553           7 :         d->sigset = ss_copy;
     554             : 
     555           7 :         if (d->fd >= 0) {
     556           3 :                 if (ret)
     557           3 :                         *ret = d;
     558           3 :                 return 0;
     559             :         }
     560             : 
     561           4 :         d->fd = fd_move_above_stdio(r);
     562             : 
     563           4 :         ev = (struct epoll_event) {
     564             :                 .events = EPOLLIN,
     565             :                 .data.ptr = d,
     566             :         };
     567             : 
     568           4 :         r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, d->fd, &ev);
     569           4 :         if (r < 0)  {
     570           0 :                 r = -errno;
     571           0 :                 goto fail;
     572             :         }
     573             : 
     574           4 :         if (ret)
     575           3 :                 *ret = d;
     576             : 
     577           4 :         return 0;
     578             : 
     579           0 : fail:
     580           0 :         if (added)
     581           0 :                 event_free_signal_data(e, d);
     582             : 
     583           0 :         return r;
     584             : }
     585             : 
     586          10 : static void event_unmask_signal_data(sd_event *e, struct signal_data *d, int sig) {
     587          10 :         assert(e);
     588          10 :         assert(d);
     589             : 
     590             :         /* Turns off the specified signal in the signal data
     591             :          * object. If the signal mask of the object becomes empty that
     592             :          * way removes it. */
     593             : 
     594          10 :         if (sigismember(&d->sigset, sig) == 0)
     595           4 :                 return;
     596             : 
     597           6 :         assert_se(sigdelset(&d->sigset, sig) >= 0);
     598             : 
     599           6 :         if (sigisemptyset(&d->sigset)) {
     600             :                 /* If all the mask is all-zero we can get rid of the structure */
     601           4 :                 event_free_signal_data(e, d);
     602           4 :                 return;
     603             :         }
     604             : 
     605           2 :         assert(d->fd >= 0);
     606             : 
     607           2 :         if (signalfd(d->fd, &d->sigset, SFD_NONBLOCK|SFD_CLOEXEC) < 0)
     608           0 :                 log_debug_errno(errno, "Failed to unset signal bit, ignoring: %m");
     609             : }
     610             : 
     611           8 : static void event_gc_signal_data(sd_event *e, const int64_t *priority, int sig) {
     612             :         struct signal_data *d;
     613             :         static const int64_t zero_priority = 0;
     614             : 
     615           8 :         assert(e);
     616             : 
     617             :         /* Rechecks if the specified signal is still something we are
     618             :          * interested in. If not, we'll unmask it, and possibly drop
     619             :          * the signalfd for it. */
     620             : 
     621           8 :         if (sig == SIGCHLD &&
     622           2 :             e->n_enabled_child_sources > 0)
     623           0 :                 return;
     624             : 
     625           8 :         if (e->signal_sources &&
     626           8 :             e->signal_sources[sig] &&
     627           1 :             e->signal_sources[sig]->enabled != SD_EVENT_OFF)
     628           0 :                 return;
     629             : 
     630             :         /*
     631             :          * The specified signal might be enabled in three different queues:
     632             :          *
     633             :          * 1) the one that belongs to the priority passed (if it is non-NULL)
     634             :          * 2) the one that belongs to the priority of the event source of the signal (if there is one)
     635             :          * 3) the 0 priority (to cover the SIGCHLD case)
     636             :          *
     637             :          * Hence, let's remove it from all three here.
     638             :          */
     639             : 
     640           8 :         if (priority) {
     641           8 :                 d = hashmap_get(e->signal_data, priority);
     642           8 :                 if (d)
     643           6 :                         event_unmask_signal_data(e, d, sig);
     644             :         }
     645             : 
     646           8 :         if (e->signal_sources && e->signal_sources[sig]) {
     647           1 :                 d = hashmap_get(e->signal_data, &e->signal_sources[sig]->priority);
     648           1 :                 if (d)
     649           0 :                         event_unmask_signal_data(e, d, sig);
     650             :         }
     651             : 
     652           8 :         d = hashmap_get(e->signal_data, &zero_priority);
     653           8 :         if (d)
     654           3 :                 event_unmask_signal_data(e, d, sig);
     655             : }
     656             : 
     657        2625 : static void source_disconnect(sd_event_source *s) {
     658             :         sd_event *event;
     659             : 
     660        2625 :         assert(s);
     661             : 
     662        2625 :         if (!s->event)
     663         487 :                 return;
     664             : 
     665        2138 :         assert(s->event->n_sources > 0);
     666             : 
     667        2138 :         switch (s->type) {
     668             : 
     669         606 :         case SOURCE_IO:
     670         606 :                 if (s->io.fd >= 0)
     671         606 :                         source_io_unregister(s);
     672             : 
     673         606 :                 break;
     674             : 
     675         514 :         case SOURCE_TIME_REALTIME:
     676             :         case SOURCE_TIME_BOOTTIME:
     677             :         case SOURCE_TIME_MONOTONIC:
     678             :         case SOURCE_TIME_REALTIME_ALARM:
     679             :         case SOURCE_TIME_BOOTTIME_ALARM: {
     680             :                 struct clock_data *d;
     681             : 
     682         514 :                 d = event_get_clock_data(s->event, s->type);
     683         514 :                 assert(d);
     684             : 
     685         514 :                 prioq_remove(d->earliest, s, &s->time.earliest_index);
     686         514 :                 prioq_remove(d->latest, s, &s->time.latest_index);
     687         514 :                 d->needs_rearm = true;
     688         514 :                 break;
     689             :         }
     690             : 
     691           5 :         case SOURCE_SIGNAL:
     692           5 :                 if (s->signal.sig > 0) {
     693             : 
     694           5 :                         if (s->event->signal_sources)
     695           5 :                                 s->event->signal_sources[s->signal.sig] = NULL;
     696             : 
     697           5 :                         event_gc_signal_data(s->event, &s->priority, s->signal.sig);
     698             :                 }
     699             : 
     700           5 :                 break;
     701             : 
     702           1 :         case SOURCE_CHILD:
     703           1 :                 if (s->child.pid > 0) {
     704           1 :                         if (s->enabled != SD_EVENT_OFF) {
     705           0 :                                 assert(s->event->n_enabled_child_sources > 0);
     706           0 :                                 s->event->n_enabled_child_sources--;
     707             :                         }
     708             : 
     709           1 :                         (void) hashmap_remove(s->event->child_sources, PID_TO_PTR(s->child.pid));
     710           1 :                         event_gc_signal_data(s->event, &s->priority, SIGCHLD);
     711             :                 }
     712             : 
     713           1 :                 break;
     714             : 
     715         516 :         case SOURCE_DEFER:
     716             :                 /* nothing */
     717         516 :                 break;
     718             : 
     719           2 :         case SOURCE_POST:
     720           2 :                 set_remove(s->event->post_sources, s);
     721           2 :                 break;
     722             : 
     723         486 :         case SOURCE_EXIT:
     724         486 :                 prioq_remove(s->event->exit, s, &s->exit.prioq_index);
     725         486 :                 break;
     726             : 
     727           8 :         case SOURCE_INOTIFY: {
     728             :                 struct inode_data *inode_data;
     729             : 
     730           8 :                 inode_data = s->inotify.inode_data;
     731           8 :                 if (inode_data) {
     732             :                         struct inotify_data *inotify_data;
     733           8 :                         assert_se(inotify_data = inode_data->inotify_data);
     734             : 
     735             :                         /* Detach this event source from the inode object */
     736           8 :                         LIST_REMOVE(inotify.by_inode_data, inode_data->event_sources, s);
     737           8 :                         s->inotify.inode_data = NULL;
     738             : 
     739           8 :                         if (s->pending) {
     740           0 :                                 assert(inotify_data->n_pending > 0);
     741           0 :                                 inotify_data->n_pending--;
     742             :                         }
     743             : 
     744             :                         /* Note that we don't reduce the inotify mask for the watch descriptor here if the inode is
     745             :                          * continued to being watched. That's because inotify doesn't really have an API for that: we
     746             :                          * can only change watch masks with access to the original inode either by fd or by path. But
     747             :                          * paths aren't stable, and keeping an O_PATH fd open all the time would mean wasting an fd
     748             :                          * continuously and keeping the mount busy which we can't really do. We could reconstruct the
     749             :                          * original inode from /proc/self/fdinfo/$INOTIFY_FD (as all watch descriptors are listed
     750             :                          * there), but given the need for open_by_handle_at() which is privileged and not universally
     751             :                          * available this would be quite an incomplete solution. Hence we go the other way, leave the
     752             :                          * mask set, even if it is not minimized now, and ignore all events we aren't interested in
     753             :                          * anymore after reception. Yes, this sucks, but … Linux … */
     754             : 
     755             :                         /* Maybe release the inode data (and its inotify) */
     756           8 :                         event_gc_inode_data(s->event, inode_data);
     757             :                 }
     758             : 
     759           8 :                 break;
     760             :         }
     761             : 
     762           0 :         default:
     763           0 :                 assert_not_reached("Wut? I shouldn't exist.");
     764             :         }
     765             : 
     766        2138 :         if (s->pending)
     767         517 :                 prioq_remove(s->event->pending, s, &s->pending_index);
     768             : 
     769        2138 :         if (s->prepare)
     770         490 :                 prioq_remove(s->event->prepare, s, &s->prepare_index);
     771             : 
     772        2138 :         event = s->event;
     773             : 
     774        2138 :         s->type = _SOURCE_EVENT_SOURCE_TYPE_INVALID;
     775        2138 :         s->event = NULL;
     776        2138 :         LIST_REMOVE(sources, event->sources, s);
     777        2138 :         event->n_sources--;
     778             : 
     779        2138 :         if (!s->floating)
     780        2135 :                 sd_event_unref(event);
     781             : }
     782             : 
     783        2138 : static void source_free(sd_event_source *s) {
     784        2138 :         assert(s);
     785             : 
     786        2138 :         source_disconnect(s);
     787             : 
     788        2138 :         if (s->type == SOURCE_IO && s->io.owned)
     789           0 :                 s->io.fd = safe_close(s->io.fd);
     790             : 
     791        2138 :         if (s->destroy_callback)
     792           0 :                 s->destroy_callback(s->userdata);
     793             : 
     794        2138 :         free(s->description);
     795        2138 :         free(s);
     796        2138 : }
     797        2143 : DEFINE_TRIVIAL_CLEANUP_FUNC(sd_event_source*, source_free);
     798             : 
     799      285699 : static int source_set_pending(sd_event_source *s, bool b) {
     800             :         int r;
     801             : 
     802      285699 :         assert(s);
     803      285699 :         assert(s->type != SOURCE_EXIT);
     804             : 
     805      285699 :         if (s->pending == b)
     806      183888 :                 return 0;
     807             : 
     808      101811 :         s->pending = b;
     809             : 
     810      101811 :         if (b) {
     811       51164 :                 s->pending_iteration = s->event->iteration;
     812             : 
     813       51164 :                 r = prioq_put(s->event->pending, s, &s->pending_index);
     814       51164 :                 if (r < 0) {
     815           0 :                         s->pending = false;
     816           0 :                         return r;
     817             :                 }
     818             :         } else
     819       50647 :                 assert_se(prioq_remove(s->event->pending, s, &s->pending_index));
     820             : 
     821      101811 :         if (EVENT_SOURCE_IS_TIME(s->type)) {
     822             :                 struct clock_data *d;
     823             : 
     824          76 :                 d = event_get_clock_data(s->event, s->type);
     825          76 :                 assert(d);
     826             : 
     827          76 :                 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
     828          76 :                 prioq_reshuffle(d->latest, s, &s->time.latest_index);
     829          76 :                 d->needs_rearm = true;
     830             :         }
     831             : 
     832      101811 :         if (s->type == SOURCE_SIGNAL && !b) {
     833             :                 struct signal_data *d;
     834             : 
     835           5 :                 d = hashmap_get(s->event->signal_data, &s->priority);
     836           5 :                 if (d && d->current == s)
     837           5 :                         d->current = NULL;
     838             :         }
     839             : 
     840      101811 :         if (s->type == SOURCE_INOTIFY) {
     841             : 
     842       98924 :                 assert(s->inotify.inode_data);
     843       98924 :                 assert(s->inotify.inode_data->inotify_data);
     844             : 
     845       98924 :                 if (b)
     846       49462 :                         s->inotify.inode_data->inotify_data->n_pending ++;
     847             :                 else {
     848       49462 :                         assert(s->inotify.inode_data->inotify_data->n_pending > 0);
     849       49462 :                         s->inotify.inode_data->inotify_data->n_pending --;
     850             :                 }
     851             :         }
     852             : 
     853      101811 :         return 0;
     854             : }
     855             : 
     856        2139 : static sd_event_source *source_new(sd_event *e, bool floating, EventSourceType type) {
     857             :         sd_event_source *s;
     858             : 
     859        2139 :         assert(e);
     860             : 
     861        2139 :         s = new(sd_event_source, 1);
     862        2139 :         if (!s)
     863           0 :                 return NULL;
     864             : 
     865        2139 :         *s = (struct sd_event_source) {
     866             :                 .n_ref = 1,
     867             :                 .event = e,
     868             :                 .floating = floating,
     869             :                 .type = type,
     870             :                 .pending_index = PRIOQ_IDX_NULL,
     871             :                 .prepare_index = PRIOQ_IDX_NULL,
     872             :         };
     873             : 
     874        2139 :         if (!floating)
     875        2136 :                 sd_event_ref(e);
     876             : 
     877        2139 :         LIST_PREPEND(sources, e->sources, s);
     878        2139 :         e->n_sources++;
     879             : 
     880        2139 :         return s;
     881             : }
     882             : 
     883         607 : _public_ int sd_event_add_io(
     884             :                 sd_event *e,
     885             :                 sd_event_source **ret,
     886             :                 int fd,
     887             :                 uint32_t events,
     888             :                 sd_event_io_handler_t callback,
     889             :                 void *userdata) {
     890             : 
     891         607 :         _cleanup_(source_freep) sd_event_source *s = NULL;
     892             :         int r;
     893             : 
     894         607 :         assert_return(e, -EINVAL);
     895         607 :         assert_return(e = event_resolve(e), -ENOPKG);
     896         607 :         assert_return(fd >= 0, -EBADF);
     897         607 :         assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
     898         607 :         assert_return(callback, -EINVAL);
     899         607 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
     900         607 :         assert_return(!event_pid_changed(e), -ECHILD);
     901             : 
     902         607 :         s = source_new(e, !ret, SOURCE_IO);
     903         607 :         if (!s)
     904           0 :                 return -ENOMEM;
     905             : 
     906         607 :         s->wakeup = WAKEUP_EVENT_SOURCE;
     907         607 :         s->io.fd = fd;
     908         607 :         s->io.events = events;
     909         607 :         s->io.callback = callback;
     910         607 :         s->userdata = userdata;
     911         607 :         s->enabled = SD_EVENT_ON;
     912             : 
     913         607 :         r = source_io_register(s, s->enabled, events);
     914         607 :         if (r < 0)
     915           0 :                 return r;
     916             : 
     917         607 :         if (ret)
     918         607 :                 *ret = s;
     919         607 :         TAKE_PTR(s);
     920             : 
     921         607 :         return 0;
     922             : }
     923             : 
     924          52 : static void initialize_perturb(sd_event *e) {
     925          52 :         sd_id128_t bootid = {};
     926             : 
     927             :         /* When we sleep for longer, we try to realign the wakeup to
     928             :            the same time within each minute/second/250ms, so that
     929             :            events all across the system can be coalesced into a single
     930             :            CPU wakeup. However, let's take some system-specific
     931             :            randomness for this value, so that in a network of systems
     932             :            with synced clocks timer events are distributed a
     933             :            bit. Here, we calculate a perturbation usec offset from the
     934             :            boot ID. */
     935             : 
     936          52 :         if (_likely_(e->perturb != USEC_INFINITY))
     937          34 :                 return;
     938             : 
     939          18 :         if (sd_id128_get_boot(&bootid) >= 0)
     940          18 :                 e->perturb = (bootid.qwords[0] ^ bootid.qwords[1]) % USEC_PER_MINUTE;
     941             : }
     942             : 
     943          30 : static int event_setup_timer_fd(
     944             :                 sd_event *e,
     945             :                 struct clock_data *d,
     946             :                 clockid_t clock) {
     947             : 
     948             :         struct epoll_event ev;
     949             :         int r, fd;
     950             : 
     951          30 :         assert(e);
     952          30 :         assert(d);
     953             : 
     954          30 :         if (_likely_(d->fd >= 0))
     955           0 :                 return 0;
     956             : 
     957          30 :         fd = timerfd_create(clock, TFD_NONBLOCK|TFD_CLOEXEC);
     958          30 :         if (fd < 0)
     959           0 :                 return -errno;
     960             : 
     961          30 :         fd = fd_move_above_stdio(fd);
     962             : 
     963          30 :         ev = (struct epoll_event) {
     964             :                 .events = EPOLLIN,
     965             :                 .data.ptr = d,
     966             :         };
     967             : 
     968          30 :         r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, fd, &ev);
     969          30 :         if (r < 0) {
     970           0 :                 safe_close(fd);
     971           0 :                 return -errno;
     972             :         }
     973             : 
     974          30 :         d->fd = fd;
     975          30 :         return 0;
     976             : }
     977             : 
     978           0 : static int time_exit_callback(sd_event_source *s, uint64_t usec, void *userdata) {
     979           0 :         assert(s);
     980             : 
     981           0 :         return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
     982             : }
     983             : 
     984         514 : _public_ int sd_event_add_time(
     985             :                 sd_event *e,
     986             :                 sd_event_source **ret,
     987             :                 clockid_t clock,
     988             :                 uint64_t usec,
     989             :                 uint64_t accuracy,
     990             :                 sd_event_time_handler_t callback,
     991             :                 void *userdata) {
     992             : 
     993             :         EventSourceType type;
     994         514 :         _cleanup_(source_freep) sd_event_source *s = NULL;
     995             :         struct clock_data *d;
     996             :         int r;
     997             : 
     998         514 :         assert_return(e, -EINVAL);
     999         514 :         assert_return(e = event_resolve(e), -ENOPKG);
    1000         514 :         assert_return(accuracy != (uint64_t) -1, -EINVAL);
    1001         514 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    1002         514 :         assert_return(!event_pid_changed(e), -ECHILD);
    1003             : 
    1004         514 :         if (!clock_supported(clock)) /* Checks whether the kernel supports the clock */
    1005           0 :                 return -EOPNOTSUPP;
    1006             : 
    1007         514 :         type = clock_to_event_source_type(clock); /* checks whether sd-event supports this clock */
    1008         514 :         if (type < 0)
    1009           0 :                 return -EOPNOTSUPP;
    1010             : 
    1011         514 :         if (!callback)
    1012           0 :                 callback = time_exit_callback;
    1013             : 
    1014         514 :         d = event_get_clock_data(e, type);
    1015         514 :         assert(d);
    1016             : 
    1017         514 :         r = prioq_ensure_allocated(&d->earliest, earliest_time_prioq_compare);
    1018         514 :         if (r < 0)
    1019           0 :                 return r;
    1020             : 
    1021         514 :         r = prioq_ensure_allocated(&d->latest, latest_time_prioq_compare);
    1022         514 :         if (r < 0)
    1023           0 :                 return r;
    1024             : 
    1025         514 :         if (d->fd < 0) {
    1026          30 :                 r = event_setup_timer_fd(e, d, clock);
    1027          30 :                 if (r < 0)
    1028           0 :                         return r;
    1029             :         }
    1030             : 
    1031         514 :         s = source_new(e, !ret, type);
    1032         514 :         if (!s)
    1033           0 :                 return -ENOMEM;
    1034             : 
    1035         514 :         s->time.next = usec;
    1036         514 :         s->time.accuracy = accuracy == 0 ? DEFAULT_ACCURACY_USEC : accuracy;
    1037         514 :         s->time.callback = callback;
    1038         514 :         s->time.earliest_index = s->time.latest_index = PRIOQ_IDX_NULL;
    1039         514 :         s->userdata = userdata;
    1040         514 :         s->enabled = SD_EVENT_ONESHOT;
    1041             : 
    1042         514 :         d->needs_rearm = true;
    1043             : 
    1044         514 :         r = prioq_put(d->earliest, s, &s->time.earliest_index);
    1045         514 :         if (r < 0)
    1046           0 :                 return r;
    1047             : 
    1048         514 :         r = prioq_put(d->latest, s, &s->time.latest_index);
    1049         514 :         if (r < 0)
    1050           0 :                 return r;
    1051             : 
    1052         514 :         if (ret)
    1053         514 :                 *ret = s;
    1054         514 :         TAKE_PTR(s);
    1055             : 
    1056         514 :         return 0;
    1057             : }
    1058             : 
    1059           0 : static int signal_exit_callback(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
    1060           0 :         assert(s);
    1061             : 
    1062           0 :         return sd_event_exit(sd_event_source_get_event(s), PTR_TO_INT(userdata));
    1063             : }
    1064             : 
    1065           9 : _public_ int sd_event_add_signal(
    1066             :                 sd_event *e,
    1067             :                 sd_event_source **ret,
    1068             :                 int sig,
    1069             :                 sd_event_signal_handler_t callback,
    1070             :                 void *userdata) {
    1071             : 
    1072           9 :         _cleanup_(source_freep) sd_event_source *s = NULL;
    1073             :         struct signal_data *d;
    1074             :         sigset_t ss;
    1075             :         int r;
    1076             : 
    1077           9 :         assert_return(e, -EINVAL);
    1078           9 :         assert_return(e = event_resolve(e), -ENOPKG);
    1079           9 :         assert_return(SIGNAL_VALID(sig), -EINVAL);
    1080           9 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    1081           9 :         assert_return(!event_pid_changed(e), -ECHILD);
    1082             : 
    1083           9 :         if (!callback)
    1084           5 :                 callback = signal_exit_callback;
    1085             : 
    1086           9 :         r = pthread_sigmask(SIG_SETMASK, NULL, &ss);
    1087           9 :         if (r != 0)
    1088           0 :                 return -r;
    1089             : 
    1090           9 :         if (!sigismember(&ss, sig))
    1091           4 :                 return -EBUSY;
    1092             : 
    1093           5 :         if (!e->signal_sources) {
    1094           2 :                 e->signal_sources = new0(sd_event_source*, _NSIG);
    1095           2 :                 if (!e->signal_sources)
    1096           0 :                         return -ENOMEM;
    1097           3 :         } else if (e->signal_sources[sig])
    1098           0 :                 return -EBUSY;
    1099             : 
    1100           5 :         s = source_new(e, !ret, SOURCE_SIGNAL);
    1101           5 :         if (!s)
    1102           0 :                 return -ENOMEM;
    1103             : 
    1104           5 :         s->signal.sig = sig;
    1105           5 :         s->signal.callback = callback;
    1106           5 :         s->userdata = userdata;
    1107           5 :         s->enabled = SD_EVENT_ON;
    1108             : 
    1109           5 :         e->signal_sources[sig] = s;
    1110             : 
    1111           5 :         r = event_make_signal_data(e, sig, &d);
    1112           5 :         if (r < 0)
    1113           0 :                 return r;
    1114             : 
    1115             :         /* Use the signal name as description for the event source by default */
    1116           5 :         (void) sd_event_source_set_description(s, signal_to_string(sig));
    1117             : 
    1118           5 :         if (ret)
    1119           4 :                 *ret = s;
    1120           5 :         TAKE_PTR(s);
    1121             : 
    1122           5 :         return 0;
    1123             : }
    1124             : 
    1125           1 : _public_ int sd_event_add_child(
    1126             :                 sd_event *e,
    1127             :                 sd_event_source **ret,
    1128             :                 pid_t pid,
    1129             :                 int options,
    1130             :                 sd_event_child_handler_t callback,
    1131             :                 void *userdata) {
    1132             : 
    1133           1 :         _cleanup_(source_freep) sd_event_source *s = NULL;
    1134             :         int r;
    1135             : 
    1136           1 :         assert_return(e, -EINVAL);
    1137           1 :         assert_return(e = event_resolve(e), -ENOPKG);
    1138           1 :         assert_return(pid > 1, -EINVAL);
    1139           1 :         assert_return(!(options & ~(WEXITED|WSTOPPED|WCONTINUED)), -EINVAL);
    1140           1 :         assert_return(options != 0, -EINVAL);
    1141           1 :         assert_return(callback, -EINVAL);
    1142           1 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    1143           1 :         assert_return(!event_pid_changed(e), -ECHILD);
    1144             : 
    1145           1 :         r = hashmap_ensure_allocated(&e->child_sources, NULL);
    1146           1 :         if (r < 0)
    1147           0 :                 return r;
    1148             : 
    1149           1 :         if (hashmap_contains(e->child_sources, PID_TO_PTR(pid)))
    1150           0 :                 return -EBUSY;
    1151             : 
    1152           1 :         s = source_new(e, !ret, SOURCE_CHILD);
    1153           1 :         if (!s)
    1154           0 :                 return -ENOMEM;
    1155             : 
    1156           1 :         s->child.pid = pid;
    1157           1 :         s->child.options = options;
    1158           1 :         s->child.callback = callback;
    1159           1 :         s->userdata = userdata;
    1160           1 :         s->enabled = SD_EVENT_ONESHOT;
    1161             : 
    1162           1 :         r = hashmap_put(e->child_sources, PID_TO_PTR(pid), s);
    1163           1 :         if (r < 0)
    1164           0 :                 return r;
    1165             : 
    1166           1 :         e->n_enabled_child_sources++;
    1167             : 
    1168           1 :         r = event_make_signal_data(e, SIGCHLD, NULL);
    1169           1 :         if (r < 0) {
    1170           0 :                 e->n_enabled_child_sources--;
    1171           0 :                 return r;
    1172             :         }
    1173             : 
    1174           1 :         e->need_process_child = true;
    1175             : 
    1176           1 :         if (ret)
    1177           1 :                 *ret = s;
    1178           1 :         TAKE_PTR(s);
    1179             : 
    1180           1 :         return 0;
    1181             : }
    1182             : 
    1183         516 : _public_ int sd_event_add_defer(
    1184             :                 sd_event *e,
    1185             :                 sd_event_source **ret,
    1186             :                 sd_event_handler_t callback,
    1187             :                 void *userdata) {
    1188             : 
    1189         516 :         _cleanup_(source_freep) sd_event_source *s = NULL;
    1190             :         int r;
    1191             : 
    1192         516 :         assert_return(e, -EINVAL);
    1193         516 :         assert_return(e = event_resolve(e), -ENOPKG);
    1194         516 :         assert_return(callback, -EINVAL);
    1195         516 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    1196         516 :         assert_return(!event_pid_changed(e), -ECHILD);
    1197             : 
    1198         516 :         s = source_new(e, !ret, SOURCE_DEFER);
    1199         516 :         if (!s)
    1200           0 :                 return -ENOMEM;
    1201             : 
    1202         516 :         s->defer.callback = callback;
    1203         516 :         s->userdata = userdata;
    1204         516 :         s->enabled = SD_EVENT_ONESHOT;
    1205             : 
    1206         516 :         r = source_set_pending(s, true);
    1207         516 :         if (r < 0)
    1208           0 :                 return r;
    1209             : 
    1210         516 :         if (ret)
    1211         516 :                 *ret = s;
    1212         516 :         TAKE_PTR(s);
    1213             : 
    1214         516 :         return 0;
    1215             : }
    1216             : 
    1217           2 : _public_ int sd_event_add_post(
    1218             :                 sd_event *e,
    1219             :                 sd_event_source **ret,
    1220             :                 sd_event_handler_t callback,
    1221             :                 void *userdata) {
    1222             : 
    1223           2 :         _cleanup_(source_freep) sd_event_source *s = NULL;
    1224             :         int r;
    1225             : 
    1226           2 :         assert_return(e, -EINVAL);
    1227           2 :         assert_return(e = event_resolve(e), -ENOPKG);
    1228           2 :         assert_return(callback, -EINVAL);
    1229           2 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    1230           2 :         assert_return(!event_pid_changed(e), -ECHILD);
    1231             : 
    1232           2 :         r = set_ensure_allocated(&e->post_sources, NULL);
    1233           2 :         if (r < 0)
    1234           0 :                 return r;
    1235             : 
    1236           2 :         s = source_new(e, !ret, SOURCE_POST);
    1237           2 :         if (!s)
    1238           0 :                 return -ENOMEM;
    1239             : 
    1240           2 :         s->post.callback = callback;
    1241           2 :         s->userdata = userdata;
    1242           2 :         s->enabled = SD_EVENT_ON;
    1243             : 
    1244           2 :         r = set_put(e->post_sources, s);
    1245           2 :         if (r < 0)
    1246           0 :                 return r;
    1247             : 
    1248           2 :         if (ret)
    1249           0 :                 *ret = s;
    1250           2 :         TAKE_PTR(s);
    1251             : 
    1252           2 :         return 0;
    1253             : }
    1254             : 
    1255         486 : _public_ int sd_event_add_exit(
    1256             :                 sd_event *e,
    1257             :                 sd_event_source **ret,
    1258             :                 sd_event_handler_t callback,
    1259             :                 void *userdata) {
    1260             : 
    1261         486 :         _cleanup_(source_freep) sd_event_source *s = NULL;
    1262             :         int r;
    1263             : 
    1264         486 :         assert_return(e, -EINVAL);
    1265         486 :         assert_return(e = event_resolve(e), -ENOPKG);
    1266         486 :         assert_return(callback, -EINVAL);
    1267         486 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    1268         486 :         assert_return(!event_pid_changed(e), -ECHILD);
    1269             : 
    1270         486 :         r = prioq_ensure_allocated(&e->exit, exit_prioq_compare);
    1271         486 :         if (r < 0)
    1272           0 :                 return r;
    1273             : 
    1274         486 :         s = source_new(e, !ret, SOURCE_EXIT);
    1275         486 :         if (!s)
    1276           0 :                 return -ENOMEM;
    1277             : 
    1278         486 :         s->exit.callback = callback;
    1279         486 :         s->userdata = userdata;
    1280         486 :         s->exit.prioq_index = PRIOQ_IDX_NULL;
    1281         486 :         s->enabled = SD_EVENT_ONESHOT;
    1282             : 
    1283         486 :         r = prioq_put(s->event->exit, s, &s->exit.prioq_index);
    1284         486 :         if (r < 0)
    1285           0 :                 return r;
    1286             : 
    1287         486 :         if (ret)
    1288         486 :                 *ret = s;
    1289         486 :         TAKE_PTR(s);
    1290             : 
    1291         486 :         return 0;
    1292             : }
    1293             : 
    1294           6 : static void event_free_inotify_data(sd_event *e, struct inotify_data *d) {
    1295           6 :         assert(e);
    1296             : 
    1297           6 :         if (!d)
    1298           0 :                 return;
    1299             : 
    1300           6 :         assert(hashmap_isempty(d->inodes));
    1301           6 :         assert(hashmap_isempty(d->wd));
    1302             : 
    1303           6 :         if (d->buffer_filled > 0)
    1304           1 :                 LIST_REMOVE(buffered, e->inotify_data_buffered, d);
    1305             : 
    1306           6 :         hashmap_free(d->inodes);
    1307           6 :         hashmap_free(d->wd);
    1308             : 
    1309           6 :         assert_se(hashmap_remove(e->inotify_data, &d->priority) == d);
    1310             : 
    1311           6 :         if (d->fd >= 0) {
    1312           6 :                 if (epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, d->fd, NULL) < 0)
    1313           0 :                         log_debug_errno(errno, "Failed to remove inotify fd from epoll, ignoring: %m");
    1314             : 
    1315           6 :                 safe_close(d->fd);
    1316             :         }
    1317           6 :         free(d);
    1318             : }
    1319             : 
    1320          14 : static int event_make_inotify_data(
    1321             :                 sd_event *e,
    1322             :                 int64_t priority,
    1323             :                 struct inotify_data **ret) {
    1324             : 
    1325          14 :         _cleanup_close_ int fd = -1;
    1326             :         struct inotify_data *d;
    1327             :         struct epoll_event ev;
    1328             :         int r;
    1329             : 
    1330          14 :         assert(e);
    1331             : 
    1332          14 :         d = hashmap_get(e->inotify_data, &priority);
    1333          14 :         if (d) {
    1334           8 :                 if (ret)
    1335           8 :                         *ret = d;
    1336           8 :                 return 0;
    1337             :         }
    1338             : 
    1339           6 :         fd = inotify_init1(IN_NONBLOCK|O_CLOEXEC);
    1340           6 :         if (fd < 0)
    1341           0 :                 return -errno;
    1342             : 
    1343           6 :         fd = fd_move_above_stdio(fd);
    1344             : 
    1345           6 :         r = hashmap_ensure_allocated(&e->inotify_data, &uint64_hash_ops);
    1346           6 :         if (r < 0)
    1347           0 :                 return r;
    1348             : 
    1349           6 :         d = new(struct inotify_data, 1);
    1350           6 :         if (!d)
    1351           0 :                 return -ENOMEM;
    1352             : 
    1353           6 :         *d = (struct inotify_data) {
    1354             :                 .wakeup = WAKEUP_INOTIFY_DATA,
    1355           6 :                 .fd = TAKE_FD(fd),
    1356             :                 .priority = priority,
    1357             :         };
    1358             : 
    1359           6 :         r = hashmap_put(e->inotify_data, &d->priority, d);
    1360           6 :         if (r < 0) {
    1361           0 :                 d->fd = safe_close(d->fd);
    1362           0 :                 free(d);
    1363           0 :                 return r;
    1364             :         }
    1365             : 
    1366           6 :         ev = (struct epoll_event) {
    1367             :                 .events = EPOLLIN,
    1368             :                 .data.ptr = d,
    1369             :         };
    1370             : 
    1371           6 :         if (epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, d->fd, &ev) < 0) {
    1372           0 :                 r = -errno;
    1373           0 :                 d->fd = safe_close(d->fd); /* let's close this ourselves, as event_free_inotify_data() would otherwise
    1374             :                                             * remove the fd from the epoll first, which we don't want as we couldn't
    1375             :                                             * add it in the first place. */
    1376           0 :                 event_free_inotify_data(e, d);
    1377           0 :                 return r;
    1378             :         }
    1379             : 
    1380           6 :         if (ret)
    1381           6 :                 *ret = d;
    1382             : 
    1383           6 :         return 1;
    1384             : }
    1385             : 
    1386          18 : static int inode_data_compare(const struct inode_data *x, const struct inode_data *y) {
    1387             :         int r;
    1388             : 
    1389          18 :         assert(x);
    1390          18 :         assert(y);
    1391             : 
    1392          18 :         r = CMP(x->dev, y->dev);
    1393          18 :         if (r != 0)
    1394           0 :                 return r;
    1395             : 
    1396          18 :         return CMP(x->ino, y->ino);
    1397             : }
    1398             : 
    1399          24 : static void inode_data_hash_func(const struct inode_data *d, struct siphash *state) {
    1400          24 :         assert(d);
    1401             : 
    1402          24 :         siphash24_compress(&d->dev, sizeof(d->dev), state);
    1403          24 :         siphash24_compress(&d->ino, sizeof(d->ino), state);
    1404          24 : }
    1405             : 
    1406             : DEFINE_PRIVATE_HASH_OPS(inode_data_hash_ops, struct inode_data, inode_data_hash_func, inode_data_compare);
    1407             : 
    1408           8 : static void event_free_inode_data(
    1409             :                 sd_event *e,
    1410             :                 struct inode_data *d) {
    1411             : 
    1412           8 :         assert(e);
    1413             : 
    1414           8 :         if (!d)
    1415           0 :                 return;
    1416             : 
    1417           8 :         assert(!d->event_sources);
    1418             : 
    1419           8 :         if (d->fd >= 0) {
    1420           2 :                 LIST_REMOVE(to_close, e->inode_data_to_close, d);
    1421           2 :                 safe_close(d->fd);
    1422             :         }
    1423             : 
    1424           8 :         if (d->inotify_data) {
    1425             : 
    1426           8 :                 if (d->wd >= 0) {
    1427           7 :                         if (d->inotify_data->fd >= 0) {
    1428             :                                 /* So here's a problem. At the time this runs the watch descriptor might already be
    1429             :                                  * invalidated, because an IN_IGNORED event might be queued right the moment we enter
    1430             :                                  * the syscall. Hence, whenever we get EINVAL, ignore it entirely, since it's a very
    1431             :                                  * likely case to happen. */
    1432             : 
    1433           7 :                                 if (inotify_rm_watch(d->inotify_data->fd, d->wd) < 0 && errno != EINVAL)
    1434           0 :                                         log_debug_errno(errno, "Failed to remove watch descriptor %i from inotify, ignoring: %m", d->wd);
    1435             :                         }
    1436             : 
    1437           7 :                         assert_se(hashmap_remove(d->inotify_data->wd, INT_TO_PTR(d->wd)) == d);
    1438             :                 }
    1439             : 
    1440           8 :                 assert_se(hashmap_remove(d->inotify_data->inodes, d) == d);
    1441             :         }
    1442             : 
    1443           8 :         free(d);
    1444             : }
    1445             : 
    1446          14 : static void event_gc_inode_data(
    1447             :                 sd_event *e,
    1448             :                 struct inode_data *d) {
    1449             : 
    1450             :         struct inotify_data *inotify_data;
    1451             : 
    1452          14 :         assert(e);
    1453             : 
    1454          14 :         if (!d)
    1455           0 :                 return;
    1456             : 
    1457          14 :         if (d->event_sources)
    1458           6 :                 return;
    1459             : 
    1460           8 :         inotify_data = d->inotify_data;
    1461           8 :         event_free_inode_data(e, d);
    1462             : 
    1463           8 :         if (inotify_data && hashmap_isempty(inotify_data->inodes))
    1464           6 :                 event_free_inotify_data(e, inotify_data);
    1465             : }
    1466             : 
    1467          14 : static int event_make_inode_data(
    1468             :                 sd_event *e,
    1469             :                 struct inotify_data *inotify_data,
    1470             :                 dev_t dev,
    1471             :                 ino_t ino,
    1472             :                 struct inode_data **ret) {
    1473             : 
    1474             :         struct inode_data *d, key;
    1475             :         int r;
    1476             : 
    1477          14 :         assert(e);
    1478          14 :         assert(inotify_data);
    1479             : 
    1480          14 :         key = (struct inode_data) {
    1481             :                 .ino = ino,
    1482             :                 .dev = dev,
    1483             :         };
    1484             : 
    1485          14 :         d = hashmap_get(inotify_data->inodes, &key);
    1486          14 :         if (d) {
    1487           6 :                 if (ret)
    1488           6 :                         *ret = d;
    1489             : 
    1490           6 :                 return 0;
    1491             :         }
    1492             : 
    1493           8 :         r = hashmap_ensure_allocated(&inotify_data->inodes, &inode_data_hash_ops);
    1494           8 :         if (r < 0)
    1495           0 :                 return r;
    1496             : 
    1497           8 :         d = new(struct inode_data, 1);
    1498           8 :         if (!d)
    1499           0 :                 return -ENOMEM;
    1500             : 
    1501           8 :         *d = (struct inode_data) {
    1502             :                 .dev = dev,
    1503             :                 .ino = ino,
    1504             :                 .wd = -1,
    1505             :                 .fd = -1,
    1506             :                 .inotify_data = inotify_data,
    1507             :         };
    1508             : 
    1509           8 :         r = hashmap_put(inotify_data->inodes, d, d);
    1510           8 :         if (r < 0) {
    1511           0 :                 free(d);
    1512           0 :                 return r;
    1513             :         }
    1514             : 
    1515           8 :         if (ret)
    1516           8 :                 *ret = d;
    1517             : 
    1518           8 :         return 1;
    1519             : }
    1520             : 
    1521          14 : static uint32_t inode_data_determine_mask(struct inode_data *d) {
    1522          14 :         bool excl_unlink = true;
    1523          14 :         uint32_t combined = 0;
    1524             :         sd_event_source *s;
    1525             : 
    1526          14 :         assert(d);
    1527             : 
    1528             :         /* Combines the watch masks of all event sources watching this inode. We generally just OR them together, but
    1529             :          * the IN_EXCL_UNLINK flag is ANDed instead.
    1530             :          *
    1531             :          * Note that we add all sources to the mask here, regardless whether enabled, disabled or oneshot. That's
    1532             :          * because we cannot change the mask anymore after the event source was created once, since the kernel has no
    1533             :          * API for that. Hence we need to subscribe to the maximum mask we ever might be interested in, and suppress
    1534             :          * events we don't care for client-side. */
    1535             : 
    1536          36 :         LIST_FOREACH(inotify.by_inode_data, s, d->event_sources) {
    1537             : 
    1538          22 :                 if ((s->inotify.mask & IN_EXCL_UNLINK) == 0)
    1539          18 :                         excl_unlink = false;
    1540             : 
    1541          22 :                 combined |= s->inotify.mask;
    1542             :         }
    1543             : 
    1544          14 :         return (combined & ~(IN_ONESHOT|IN_DONT_FOLLOW|IN_ONLYDIR|IN_EXCL_UNLINK)) | (excl_unlink ? IN_EXCL_UNLINK : 0);
    1545             : }
    1546             : 
    1547          14 : static int inode_data_realize_watch(sd_event *e, struct inode_data *d) {
    1548             :         uint32_t combined_mask;
    1549             :         int wd, r;
    1550             : 
    1551          14 :         assert(d);
    1552          14 :         assert(d->fd >= 0);
    1553             : 
    1554          14 :         combined_mask = inode_data_determine_mask(d);
    1555             : 
    1556          14 :         if (d->wd >= 0 && combined_mask == d->combined_mask)
    1557           4 :                 return 0;
    1558             : 
    1559          10 :         r = hashmap_ensure_allocated(&d->inotify_data->wd, NULL);
    1560          10 :         if (r < 0)
    1561           0 :                 return r;
    1562             : 
    1563          10 :         wd = inotify_add_watch_fd(d->inotify_data->fd, d->fd, combined_mask);
    1564          10 :         if (wd < 0)
    1565           0 :                 return -errno;
    1566             : 
    1567          10 :         if (d->wd < 0) {
    1568           8 :                 r = hashmap_put(d->inotify_data->wd, INT_TO_PTR(wd), d);
    1569           8 :                 if (r < 0) {
    1570           0 :                         (void) inotify_rm_watch(d->inotify_data->fd, wd);
    1571           0 :                         return r;
    1572             :                 }
    1573             : 
    1574           8 :                 d->wd = wd;
    1575             : 
    1576           2 :         } else if (d->wd != wd) {
    1577             : 
    1578           0 :                 log_debug("Weird, the watch descriptor we already knew for this inode changed?");
    1579           0 :                 (void) inotify_rm_watch(d->fd, wd);
    1580           0 :                 return -EINVAL;
    1581             :         }
    1582             : 
    1583          10 :         d->combined_mask = combined_mask;
    1584          10 :         return 1;
    1585             : }
    1586             : 
    1587           8 : _public_ int sd_event_add_inotify(
    1588             :                 sd_event *e,
    1589             :                 sd_event_source **ret,
    1590             :                 const char *path,
    1591             :                 uint32_t mask,
    1592             :                 sd_event_inotify_handler_t callback,
    1593             :                 void *userdata) {
    1594             : 
    1595           8 :         struct inotify_data *inotify_data = NULL;
    1596           8 :         struct inode_data *inode_data = NULL;
    1597           8 :         _cleanup_close_ int fd = -1;
    1598           8 :         _cleanup_(source_freep) sd_event_source *s = NULL;
    1599             :         struct stat st;
    1600             :         int r;
    1601             : 
    1602           8 :         assert_return(e, -EINVAL);
    1603           8 :         assert_return(e = event_resolve(e), -ENOPKG);
    1604           8 :         assert_return(path, -EINVAL);
    1605           8 :         assert_return(callback, -EINVAL);
    1606           8 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    1607           8 :         assert_return(!event_pid_changed(e), -ECHILD);
    1608             : 
    1609             :         /* Refuse IN_MASK_ADD since we coalesce watches on the same inode, and hence really don't want to merge
    1610             :          * masks. Or in other words, this whole code exists only to manage IN_MASK_ADD type operations for you, hence
    1611             :          * the user can't use them for us. */
    1612           8 :         if (mask & IN_MASK_ADD)
    1613           0 :                 return -EINVAL;
    1614             : 
    1615          24 :         fd = open(path, O_PATH|O_CLOEXEC|
    1616           8 :                   (mask & IN_ONLYDIR ? O_DIRECTORY : 0)|
    1617           8 :                   (mask & IN_DONT_FOLLOW ? O_NOFOLLOW : 0));
    1618           8 :         if (fd < 0)
    1619           0 :                 return -errno;
    1620             : 
    1621           8 :         if (fstat(fd, &st) < 0)
    1622           0 :                 return -errno;
    1623             : 
    1624           8 :         s = source_new(e, !ret, SOURCE_INOTIFY);
    1625           8 :         if (!s)
    1626           0 :                 return -ENOMEM;
    1627             : 
    1628           8 :         s->enabled = mask & IN_ONESHOT ? SD_EVENT_ONESHOT : SD_EVENT_ON;
    1629           8 :         s->inotify.mask = mask;
    1630           8 :         s->inotify.callback = callback;
    1631           8 :         s->userdata = userdata;
    1632             : 
    1633             :         /* Allocate an inotify object for this priority, and an inode object within it */
    1634           8 :         r = event_make_inotify_data(e, SD_EVENT_PRIORITY_NORMAL, &inotify_data);
    1635           8 :         if (r < 0)
    1636           0 :                 return r;
    1637             : 
    1638           8 :         r = event_make_inode_data(e, inotify_data, st.st_dev, st.st_ino, &inode_data);
    1639           8 :         if (r < 0) {
    1640           0 :                 event_free_inotify_data(e, inotify_data);
    1641           0 :                 return r;
    1642             :         }
    1643             : 
    1644             :         /* Keep the O_PATH fd around until the first iteration of the loop, so that we can still change the priority of
    1645             :          * the event source, until then, for which we need the original inode. */
    1646           8 :         if (inode_data->fd < 0) {
    1647           4 :                 inode_data->fd = TAKE_FD(fd);
    1648           4 :                 LIST_PREPEND(to_close, e->inode_data_to_close, inode_data);
    1649             :         }
    1650             : 
    1651             :         /* Link our event source to the inode data object */
    1652           8 :         LIST_PREPEND(inotify.by_inode_data, inode_data->event_sources, s);
    1653           8 :         s->inotify.inode_data = inode_data;
    1654             : 
    1655             :         /* Actually realize the watch now */
    1656           8 :         r = inode_data_realize_watch(e, inode_data);
    1657           8 :         if (r < 0)
    1658           0 :                 return r;
    1659             : 
    1660           8 :         (void) sd_event_source_set_description(s, path);
    1661             : 
    1662           8 :         if (ret)
    1663           8 :                 *ret = s;
    1664           8 :         TAKE_PTR(s);
    1665             : 
    1666           8 :         return 0;
    1667             : }
    1668             : 
    1669        2138 : static sd_event_source* event_source_free(sd_event_source *s) {
    1670        2138 :         if (!s)
    1671           0 :                 return NULL;
    1672             : 
    1673             :         /* Here's a special hack: when we are called from a
    1674             :          * dispatch handler we won't free the event source
    1675             :          * immediately, but we will detach the fd from the
    1676             :          * epoll. This way it is safe for the caller to unref
    1677             :          * the event source and immediately close the fd, but
    1678             :          * we still retain a valid event source object after
    1679             :          * the callback. */
    1680             : 
    1681        2138 :         if (s->dispatching) {
    1682         484 :                 if (s->type == SOURCE_IO)
    1683           9 :                         source_io_unregister(s);
    1684             : 
    1685         484 :                 source_disconnect(s);
    1686             :         } else
    1687        1654 :                 source_free(s);
    1688             : 
    1689        2138 :         return NULL;
    1690             : }
    1691             : 
    1692        5406 : DEFINE_PUBLIC_TRIVIAL_REF_UNREF_FUNC(sd_event_source, sd_event_source, event_source_free);
    1693             : 
    1694        2167 : _public_ int sd_event_source_set_description(sd_event_source *s, const char *description) {
    1695        2167 :         assert_return(s, -EINVAL);
    1696        2167 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1697             : 
    1698        2167 :         return free_and_strdup(&s->description, description);
    1699             : }
    1700             : 
    1701       49459 : _public_ int sd_event_source_get_description(sd_event_source *s, const char **description) {
    1702       49459 :         assert_return(s, -EINVAL);
    1703       49459 :         assert_return(description, -EINVAL);
    1704       49459 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1705             : 
    1706       49459 :         if (!s->description)
    1707           0 :                 return -ENXIO;
    1708             : 
    1709       49459 :         *description = s->description;
    1710       49459 :         return 0;
    1711             : }
    1712             : 
    1713           6 : _public_ sd_event *sd_event_source_get_event(sd_event_source *s) {
    1714           6 :         assert_return(s, NULL);
    1715             : 
    1716           6 :         return s->event;
    1717             : }
    1718             : 
    1719           0 : _public_ int sd_event_source_get_pending(sd_event_source *s) {
    1720           0 :         assert_return(s, -EINVAL);
    1721           0 :         assert_return(s->type != SOURCE_EXIT, -EDOM);
    1722           0 :         assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
    1723           0 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1724             : 
    1725           0 :         return s->pending;
    1726             : }
    1727             : 
    1728           0 : _public_ int sd_event_source_get_io_fd(sd_event_source *s) {
    1729           0 :         assert_return(s, -EINVAL);
    1730           0 :         assert_return(s->type == SOURCE_IO, -EDOM);
    1731           0 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1732             : 
    1733           0 :         return s->io.fd;
    1734             : }
    1735             : 
    1736           6 : _public_ int sd_event_source_set_io_fd(sd_event_source *s, int fd) {
    1737             :         int r;
    1738             : 
    1739           6 :         assert_return(s, -EINVAL);
    1740           6 :         assert_return(fd >= 0, -EBADF);
    1741           6 :         assert_return(s->type == SOURCE_IO, -EDOM);
    1742           6 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1743             : 
    1744           6 :         if (s->io.fd == fd)
    1745           6 :                 return 0;
    1746             : 
    1747           0 :         if (s->enabled == SD_EVENT_OFF) {
    1748           0 :                 s->io.fd = fd;
    1749           0 :                 s->io.registered = false;
    1750             :         } else {
    1751             :                 int saved_fd;
    1752             : 
    1753           0 :                 saved_fd = s->io.fd;
    1754           0 :                 assert(s->io.registered);
    1755             : 
    1756           0 :                 s->io.fd = fd;
    1757           0 :                 s->io.registered = false;
    1758             : 
    1759           0 :                 r = source_io_register(s, s->enabled, s->io.events);
    1760           0 :                 if (r < 0) {
    1761           0 :                         s->io.fd = saved_fd;
    1762           0 :                         s->io.registered = true;
    1763           0 :                         return r;
    1764             :                 }
    1765             : 
    1766           0 :                 epoll_ctl(s->event->epoll_fd, EPOLL_CTL_DEL, saved_fd, NULL);
    1767             :         }
    1768             : 
    1769           0 :         return 0;
    1770             : }
    1771             : 
    1772           0 : _public_ int sd_event_source_get_io_fd_own(sd_event_source *s) {
    1773           0 :         assert_return(s, -EINVAL);
    1774           0 :         assert_return(s->type == SOURCE_IO, -EDOM);
    1775             : 
    1776           0 :         return s->io.owned;
    1777             : }
    1778             : 
    1779           0 : _public_ int sd_event_source_set_io_fd_own(sd_event_source *s, int own) {
    1780           0 :         assert_return(s, -EINVAL);
    1781           0 :         assert_return(s->type == SOURCE_IO, -EDOM);
    1782             : 
    1783           0 :         s->io.owned = own;
    1784           0 :         return 0;
    1785             : }
    1786             : 
    1787           0 : _public_ int sd_event_source_get_io_events(sd_event_source *s, uint32_t* events) {
    1788           0 :         assert_return(s, -EINVAL);
    1789           0 :         assert_return(events, -EINVAL);
    1790           0 :         assert_return(s->type == SOURCE_IO, -EDOM);
    1791           0 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1792             : 
    1793           0 :         *events = s->io.events;
    1794           0 :         return 0;
    1795             : }
    1796             : 
    1797      368058 : _public_ int sd_event_source_set_io_events(sd_event_source *s, uint32_t events) {
    1798             :         int r;
    1799             : 
    1800      368058 :         assert_return(s, -EINVAL);
    1801      368058 :         assert_return(s->type == SOURCE_IO, -EDOM);
    1802      368058 :         assert_return(!(events & ~(EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLPRI|EPOLLERR|EPOLLHUP|EPOLLET)), -EINVAL);
    1803      368058 :         assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
    1804      368058 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1805             : 
    1806             :         /* edge-triggered updates are never skipped, so we can reset edges */
    1807      368058 :         if (s->io.events == events && !(events & EPOLLET))
    1808      366828 :                 return 0;
    1809             : 
    1810        1230 :         r = source_set_pending(s, false);
    1811        1230 :         if (r < 0)
    1812           0 :                 return r;
    1813             : 
    1814        1230 :         if (s->enabled != SD_EVENT_OFF) {
    1815        1230 :                 r = source_io_register(s, s->enabled, events);
    1816        1230 :                 if (r < 0)
    1817           0 :                         return r;
    1818             :         }
    1819             : 
    1820        1230 :         s->io.events = events;
    1821             : 
    1822        1230 :         return 0;
    1823             : }
    1824             : 
    1825           0 : _public_ int sd_event_source_get_io_revents(sd_event_source *s, uint32_t* revents) {
    1826           0 :         assert_return(s, -EINVAL);
    1827           0 :         assert_return(revents, -EINVAL);
    1828           0 :         assert_return(s->type == SOURCE_IO, -EDOM);
    1829           0 :         assert_return(s->pending, -ENODATA);
    1830           0 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1831             : 
    1832           0 :         *revents = s->io.revents;
    1833           0 :         return 0;
    1834             : }
    1835             : 
    1836           0 : _public_ int sd_event_source_get_signal(sd_event_source *s) {
    1837           0 :         assert_return(s, -EINVAL);
    1838           0 :         assert_return(s->type == SOURCE_SIGNAL, -EDOM);
    1839           0 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1840             : 
    1841           0 :         return s->signal.sig;
    1842             : }
    1843             : 
    1844           1 : _public_ int sd_event_source_get_priority(sd_event_source *s, int64_t *priority) {
    1845           1 :         assert_return(s, -EINVAL);
    1846           1 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1847             : 
    1848           1 :         *priority = s->priority;
    1849           1 :         return 0;
    1850             : }
    1851             : 
    1852        2098 : _public_ int sd_event_source_set_priority(sd_event_source *s, int64_t priority) {
    1853        2098 :         bool rm_inotify = false, rm_inode = false;
    1854        2098 :         struct inotify_data *new_inotify_data = NULL;
    1855        2098 :         struct inode_data *new_inode_data = NULL;
    1856             :         int r;
    1857             : 
    1858        2098 :         assert_return(s, -EINVAL);
    1859        2098 :         assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
    1860        2098 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1861             : 
    1862        2098 :         if (s->priority == priority)
    1863         650 :                 return 0;
    1864             : 
    1865        1448 :         if (s->type == SOURCE_INOTIFY) {
    1866             :                 struct inode_data *old_inode_data;
    1867             : 
    1868           6 :                 assert(s->inotify.inode_data);
    1869           6 :                 old_inode_data = s->inotify.inode_data;
    1870             : 
    1871             :                 /* We need the original fd to change the priority. If we don't have it we can't change the priority,
    1872             :                  * anymore. Note that we close any fds when entering the next event loop iteration, i.e. for inotify
    1873             :                  * events we allow priority changes only until the first following iteration. */
    1874           6 :                 if (old_inode_data->fd < 0)
    1875           0 :                         return -EOPNOTSUPP;
    1876             : 
    1877           6 :                 r = event_make_inotify_data(s->event, priority, &new_inotify_data);
    1878           6 :                 if (r < 0)
    1879           0 :                         return r;
    1880           6 :                 rm_inotify = r > 0;
    1881             : 
    1882           6 :                 r = event_make_inode_data(s->event, new_inotify_data, old_inode_data->dev, old_inode_data->ino, &new_inode_data);
    1883           6 :                 if (r < 0)
    1884           0 :                         goto fail;
    1885           6 :                 rm_inode = r > 0;
    1886             : 
    1887           6 :                 if (new_inode_data->fd < 0) {
    1888             :                         /* Duplicate the fd for the new inode object if we don't have any yet */
    1889           4 :                         new_inode_data->fd = fcntl(old_inode_data->fd, F_DUPFD_CLOEXEC, 3);
    1890           4 :                         if (new_inode_data->fd < 0) {
    1891           0 :                                 r = -errno;
    1892           0 :                                 goto fail;
    1893             :                         }
    1894             : 
    1895           4 :                         LIST_PREPEND(to_close, s->event->inode_data_to_close, new_inode_data);
    1896             :                 }
    1897             : 
    1898             :                 /* Move the event source to the new inode data structure */
    1899           6 :                 LIST_REMOVE(inotify.by_inode_data, old_inode_data->event_sources, s);
    1900           6 :                 LIST_PREPEND(inotify.by_inode_data, new_inode_data->event_sources, s);
    1901           6 :                 s->inotify.inode_data = new_inode_data;
    1902             : 
    1903             :                 /* Now create the new watch */
    1904           6 :                 r = inode_data_realize_watch(s->event, new_inode_data);
    1905           6 :                 if (r < 0) {
    1906             :                         /* Move it back */
    1907           0 :                         LIST_REMOVE(inotify.by_inode_data, new_inode_data->event_sources, s);
    1908           0 :                         LIST_PREPEND(inotify.by_inode_data, old_inode_data->event_sources, s);
    1909           0 :                         s->inotify.inode_data = old_inode_data;
    1910           0 :                         goto fail;
    1911             :                 }
    1912             : 
    1913           6 :                 s->priority = priority;
    1914             : 
    1915           6 :                 event_gc_inode_data(s->event, old_inode_data);
    1916             : 
    1917        1443 :         } else if (s->type == SOURCE_SIGNAL && s->enabled != SD_EVENT_OFF) {
    1918             :                 struct signal_data *old, *d;
    1919             : 
    1920             :                 /* Move us from the signalfd belonging to the old
    1921             :                  * priority to the signalfd of the new priority */
    1922             : 
    1923           1 :                 assert_se(old = hashmap_get(s->event->signal_data, &s->priority));
    1924             : 
    1925           1 :                 s->priority = priority;
    1926             : 
    1927           1 :                 r = event_make_signal_data(s->event, s->signal.sig, &d);
    1928           1 :                 if (r < 0) {
    1929           0 :                         s->priority = old->priority;
    1930           0 :                         return r;
    1931             :                 }
    1932             : 
    1933           1 :                 event_unmask_signal_data(s->event, old, s->signal.sig);
    1934             :         } else
    1935        1441 :                 s->priority = priority;
    1936             : 
    1937        1448 :         if (s->pending)
    1938         382 :                 prioq_reshuffle(s->event->pending, s, &s->pending_index);
    1939             : 
    1940        1448 :         if (s->prepare)
    1941         333 :                 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
    1942             : 
    1943        1448 :         if (s->type == SOURCE_EXIT)
    1944         333 :                 prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
    1945             : 
    1946        1448 :         return 0;
    1947             : 
    1948           0 : fail:
    1949           0 :         if (rm_inode)
    1950           0 :                 event_free_inode_data(s->event, new_inode_data);
    1951             : 
    1952           0 :         if (rm_inotify)
    1953           0 :                 event_free_inotify_data(s->event, new_inotify_data);
    1954             : 
    1955           0 :         return r;
    1956             : }
    1957             : 
    1958           0 : _public_ int sd_event_source_get_enabled(sd_event_source *s, int *m) {
    1959           0 :         assert_return(s, -EINVAL);
    1960           0 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1961             : 
    1962           0 :         if (m)
    1963           0 :                 *m = s->enabled;
    1964           0 :         return s->enabled != SD_EVENT_OFF;
    1965             : }
    1966             : 
    1967      372438 : _public_ int sd_event_source_set_enabled(sd_event_source *s, int m) {
    1968             :         int r;
    1969             : 
    1970      372438 :         assert_return(s, -EINVAL);
    1971      372438 :         assert_return(IN_SET(m, SD_EVENT_OFF, SD_EVENT_ON, SD_EVENT_ONESHOT), -EINVAL);
    1972      372438 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    1973             : 
    1974             :         /* If we are dead anyway, we are fine with turning off
    1975             :          * sources, but everything else needs to fail. */
    1976      372438 :         if (s->event->state == SD_EVENT_FINISHED)
    1977           2 :                 return m == SD_EVENT_OFF ? 0 : -ESTALE;
    1978             : 
    1979      372436 :         if (s->enabled == m)
    1980      369157 :                 return 0;
    1981             : 
    1982        3279 :         if (m == SD_EVENT_OFF) {
    1983             : 
    1984             :                 /* Unset the pending flag when this event source is disabled */
    1985        2621 :                 if (!IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
    1986        1027 :                         r = source_set_pending(s, false);
    1987        1027 :                         if (r < 0)
    1988           0 :                                 return r;
    1989             :                 }
    1990             : 
    1991        2621 :                 switch (s->type) {
    1992             : 
    1993         493 :                 case SOURCE_IO:
    1994         493 :                         source_io_unregister(s);
    1995         493 :                         s->enabled = m;
    1996         493 :                         break;
    1997             : 
    1998         532 :                 case SOURCE_TIME_REALTIME:
    1999             :                 case SOURCE_TIME_BOOTTIME:
    2000             :                 case SOURCE_TIME_MONOTONIC:
    2001             :                 case SOURCE_TIME_REALTIME_ALARM:
    2002             :                 case SOURCE_TIME_BOOTTIME_ALARM: {
    2003             :                         struct clock_data *d;
    2004             : 
    2005         532 :                         s->enabled = m;
    2006         532 :                         d = event_get_clock_data(s->event, s->type);
    2007         532 :                         assert(d);
    2008             : 
    2009         532 :                         prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
    2010         532 :                         prioq_reshuffle(d->latest, s, &s->time.latest_index);
    2011         532 :                         d->needs_rearm = true;
    2012         532 :                         break;
    2013             :                 }
    2014             : 
    2015           1 :                 case SOURCE_SIGNAL:
    2016           1 :                         s->enabled = m;
    2017             : 
    2018           1 :                         event_gc_signal_data(s->event, &s->priority, s->signal.sig);
    2019           1 :                         break;
    2020             : 
    2021           1 :                 case SOURCE_CHILD:
    2022           1 :                         s->enabled = m;
    2023             : 
    2024           1 :                         assert(s->event->n_enabled_child_sources > 0);
    2025           1 :                         s->event->n_enabled_child_sources--;
    2026             : 
    2027           1 :                         event_gc_signal_data(s->event, &s->priority, SIGCHLD);
    2028           1 :                         break;
    2029             : 
    2030         486 :                 case SOURCE_EXIT:
    2031         486 :                         s->enabled = m;
    2032         486 :                         prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
    2033         486 :                         break;
    2034             : 
    2035        1108 :                 case SOURCE_DEFER:
    2036             :                 case SOURCE_POST:
    2037             :                 case SOURCE_INOTIFY:
    2038        1108 :                         s->enabled = m;
    2039        1108 :                         break;
    2040             : 
    2041           0 :                 default:
    2042           0 :                         assert_not_reached("Wut? I shouldn't exist.");
    2043             :                 }
    2044             : 
    2045             :         } else {
    2046             : 
    2047             :                 /* Unset the pending flag when this event source is enabled */
    2048         658 :                 if (s->enabled == SD_EVENT_OFF && !IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
    2049          39 :                         r = source_set_pending(s, false);
    2050          39 :                         if (r < 0)
    2051           0 :                                 return r;
    2052             :                 }
    2053             : 
    2054         658 :                 switch (s->type) {
    2055             : 
    2056           3 :                 case SOURCE_IO:
    2057           3 :                         r = source_io_register(s, m, s->io.events);
    2058           3 :                         if (r < 0)
    2059           0 :                                 return r;
    2060             : 
    2061           3 :                         s->enabled = m;
    2062           3 :                         break;
    2063             : 
    2064          49 :                 case SOURCE_TIME_REALTIME:
    2065             :                 case SOURCE_TIME_BOOTTIME:
    2066             :                 case SOURCE_TIME_MONOTONIC:
    2067             :                 case SOURCE_TIME_REALTIME_ALARM:
    2068             :                 case SOURCE_TIME_BOOTTIME_ALARM: {
    2069             :                         struct clock_data *d;
    2070             : 
    2071          49 :                         s->enabled = m;
    2072          49 :                         d = event_get_clock_data(s->event, s->type);
    2073          49 :                         assert(d);
    2074             : 
    2075          49 :                         prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
    2076          49 :                         prioq_reshuffle(d->latest, s, &s->time.latest_index);
    2077          49 :                         d->needs_rearm = true;
    2078          49 :                         break;
    2079             :                 }
    2080             : 
    2081           1 :                 case SOURCE_SIGNAL:
    2082             : 
    2083           1 :                         s->enabled = m;
    2084             : 
    2085           1 :                         r = event_make_signal_data(s->event, s->signal.sig, NULL);
    2086           1 :                         if (r < 0) {
    2087           0 :                                 s->enabled = SD_EVENT_OFF;
    2088           0 :                                 event_gc_signal_data(s->event, &s->priority, s->signal.sig);
    2089           0 :                                 return r;
    2090             :                         }
    2091             : 
    2092           1 :                         break;
    2093             : 
    2094           0 :                 case SOURCE_CHILD:
    2095             : 
    2096           0 :                         if (s->enabled == SD_EVENT_OFF)
    2097           0 :                                 s->event->n_enabled_child_sources++;
    2098             : 
    2099           0 :                         s->enabled = m;
    2100             : 
    2101           0 :                         r = event_make_signal_data(s->event, SIGCHLD, NULL);
    2102           0 :                         if (r < 0) {
    2103           0 :                                 s->enabled = SD_EVENT_OFF;
    2104           0 :                                 s->event->n_enabled_child_sources--;
    2105           0 :                                 event_gc_signal_data(s->event, &s->priority, SIGCHLD);
    2106           0 :                                 return r;
    2107             :                         }
    2108             : 
    2109           0 :                         break;
    2110             : 
    2111           0 :                 case SOURCE_EXIT:
    2112           0 :                         s->enabled = m;
    2113           0 :                         prioq_reshuffle(s->event->exit, s, &s->exit.prioq_index);
    2114           0 :                         break;
    2115             : 
    2116         605 :                 case SOURCE_DEFER:
    2117             :                 case SOURCE_POST:
    2118             :                 case SOURCE_INOTIFY:
    2119         605 :                         s->enabled = m;
    2120         605 :                         break;
    2121             : 
    2122           0 :                 default:
    2123           0 :                         assert_not_reached("Wut? I shouldn't exist.");
    2124             :                 }
    2125             :         }
    2126             : 
    2127        3279 :         if (s->pending)
    2128        1713 :                 prioq_reshuffle(s->event->pending, s, &s->pending_index);
    2129             : 
    2130        3279 :         if (s->prepare)
    2131         489 :                 prioq_reshuffle(s->event->prepare, s, &s->prepare_index);
    2132             : 
    2133        3279 :         return 0;
    2134             : }
    2135             : 
    2136           0 : _public_ int sd_event_source_get_time(sd_event_source *s, uint64_t *usec) {
    2137           0 :         assert_return(s, -EINVAL);
    2138           0 :         assert_return(usec, -EINVAL);
    2139           0 :         assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
    2140           0 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    2141             : 
    2142           0 :         *usec = s->time.next;
    2143           0 :         return 0;
    2144             : }
    2145             : 
    2146        1194 : _public_ int sd_event_source_set_time(sd_event_source *s, uint64_t usec) {
    2147             :         struct clock_data *d;
    2148             :         int r;
    2149             : 
    2150        1194 :         assert_return(s, -EINVAL);
    2151        1194 :         assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
    2152        1194 :         assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
    2153        1194 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    2154             : 
    2155        1194 :         r = source_set_pending(s, false);
    2156        1194 :         if (r < 0)
    2157           0 :                 return r;
    2158             : 
    2159        1194 :         s->time.next = usec;
    2160             : 
    2161        1194 :         d = event_get_clock_data(s->event, s->type);
    2162        1194 :         assert(d);
    2163             : 
    2164        1194 :         prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
    2165        1194 :         prioq_reshuffle(d->latest, s, &s->time.latest_index);
    2166        1194 :         d->needs_rearm = true;
    2167             : 
    2168        1194 :         return 0;
    2169             : }
    2170             : 
    2171           0 : _public_ int sd_event_source_get_time_accuracy(sd_event_source *s, uint64_t *usec) {
    2172           0 :         assert_return(s, -EINVAL);
    2173           0 :         assert_return(usec, -EINVAL);
    2174           0 :         assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
    2175           0 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    2176             : 
    2177           0 :         *usec = s->time.accuracy;
    2178           0 :         return 0;
    2179             : }
    2180             : 
    2181          43 : _public_ int sd_event_source_set_time_accuracy(sd_event_source *s, uint64_t usec) {
    2182             :         struct clock_data *d;
    2183             :         int r;
    2184             : 
    2185          43 :         assert_return(s, -EINVAL);
    2186          43 :         assert_return(usec != (uint64_t) -1, -EINVAL);
    2187          43 :         assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
    2188          43 :         assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
    2189          43 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    2190             : 
    2191          43 :         r = source_set_pending(s, false);
    2192          43 :         if (r < 0)
    2193           0 :                 return r;
    2194             : 
    2195          43 :         if (usec == 0)
    2196          15 :                 usec = DEFAULT_ACCURACY_USEC;
    2197             : 
    2198          43 :         s->time.accuracy = usec;
    2199             : 
    2200          43 :         d = event_get_clock_data(s->event, s->type);
    2201          43 :         assert(d);
    2202             : 
    2203          43 :         prioq_reshuffle(d->latest, s, &s->time.latest_index);
    2204          43 :         d->needs_rearm = true;
    2205             : 
    2206          43 :         return 0;
    2207             : }
    2208             : 
    2209          43 : _public_ int sd_event_source_get_time_clock(sd_event_source *s, clockid_t *clock) {
    2210          43 :         assert_return(s, -EINVAL);
    2211          43 :         assert_return(clock, -EINVAL);
    2212          43 :         assert_return(EVENT_SOURCE_IS_TIME(s->type), -EDOM);
    2213          43 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    2214             : 
    2215          43 :         *clock = event_source_type_to_clock(s->type);
    2216          43 :         return 0;
    2217             : }
    2218             : 
    2219           0 : _public_ int sd_event_source_get_child_pid(sd_event_source *s, pid_t *pid) {
    2220           0 :         assert_return(s, -EINVAL);
    2221           0 :         assert_return(pid, -EINVAL);
    2222           0 :         assert_return(s->type == SOURCE_CHILD, -EDOM);
    2223           0 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    2224             : 
    2225           0 :         *pid = s->child.pid;
    2226           0 :         return 0;
    2227             : }
    2228             : 
    2229           0 : _public_ int sd_event_source_get_inotify_mask(sd_event_source *s, uint32_t *mask) {
    2230           0 :         assert_return(s, -EINVAL);
    2231           0 :         assert_return(mask, -EINVAL);
    2232           0 :         assert_return(s->type == SOURCE_INOTIFY, -EDOM);
    2233           0 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    2234             : 
    2235           0 :         *mask = s->inotify.mask;
    2236           0 :         return 0;
    2237             : }
    2238             : 
    2239         490 : _public_ int sd_event_source_set_prepare(sd_event_source *s, sd_event_handler_t callback) {
    2240             :         int r;
    2241             : 
    2242         490 :         assert_return(s, -EINVAL);
    2243         490 :         assert_return(s->type != SOURCE_EXIT, -EDOM);
    2244         490 :         assert_return(s->event->state != SD_EVENT_FINISHED, -ESTALE);
    2245         490 :         assert_return(!event_pid_changed(s->event), -ECHILD);
    2246             : 
    2247         490 :         if (s->prepare == callback)
    2248           0 :                 return 0;
    2249             : 
    2250         490 :         if (callback && s->prepare) {
    2251           0 :                 s->prepare = callback;
    2252           0 :                 return 0;
    2253             :         }
    2254             : 
    2255         490 :         r = prioq_ensure_allocated(&s->event->prepare, prepare_prioq_compare);
    2256         490 :         if (r < 0)
    2257           0 :                 return r;
    2258             : 
    2259         490 :         s->prepare = callback;
    2260             : 
    2261         490 :         if (callback) {
    2262         490 :                 r = prioq_put(s->event->prepare, s, &s->prepare_index);
    2263         490 :                 if (r < 0)
    2264           0 :                         return r;
    2265             :         } else
    2266           0 :                 prioq_remove(s->event->prepare, s, &s->prepare_index);
    2267             : 
    2268         490 :         return 0;
    2269             : }
    2270             : 
    2271           0 : _public_ void* sd_event_source_get_userdata(sd_event_source *s) {
    2272           0 :         assert_return(s, NULL);
    2273             : 
    2274           0 :         return s->userdata;
    2275             : }
    2276             : 
    2277          43 : _public_ void *sd_event_source_set_userdata(sd_event_source *s, void *userdata) {
    2278             :         void *ret;
    2279             : 
    2280          43 :         assert_return(s, NULL);
    2281             : 
    2282          43 :         ret = s->userdata;
    2283          43 :         s->userdata = userdata;
    2284             : 
    2285          43 :         return ret;
    2286             : }
    2287             : 
    2288          87 : static usec_t sleep_between(sd_event *e, usec_t a, usec_t b) {
    2289             :         usec_t c;
    2290          87 :         assert(e);
    2291          87 :         assert(a <= b);
    2292             : 
    2293          87 :         if (a <= 0)
    2294          35 :                 return 0;
    2295          52 :         if (a >= USEC_INFINITY)
    2296           0 :                 return USEC_INFINITY;
    2297             : 
    2298          52 :         if (b <= a + 1)
    2299           0 :                 return a;
    2300             : 
    2301          52 :         initialize_perturb(e);
    2302             : 
    2303             :         /*
    2304             :           Find a good time to wake up again between times a and b. We
    2305             :           have two goals here:
    2306             : 
    2307             :           a) We want to wake up as seldom as possible, hence prefer
    2308             :              later times over earlier times.
    2309             : 
    2310             :           b) But if we have to wake up, then let's make sure to
    2311             :              dispatch as much as possible on the entire system.
    2312             : 
    2313             :           We implement this by waking up everywhere at the same time
    2314             :           within any given minute if we can, synchronised via the
    2315             :           perturbation value determined from the boot ID. If we can't,
    2316             :           then we try to find the same spot in every 10s, then 1s and
    2317             :           then 250ms step. Otherwise, we pick the last possible time
    2318             :           to wake up.
    2319             :         */
    2320             : 
    2321          52 :         c = (b / USEC_PER_MINUTE) * USEC_PER_MINUTE + e->perturb;
    2322          52 :         if (c >= b) {
    2323          52 :                 if (_unlikely_(c < USEC_PER_MINUTE))
    2324           0 :                         return b;
    2325             : 
    2326          52 :                 c -= USEC_PER_MINUTE;
    2327             :         }
    2328             : 
    2329          52 :         if (c >= a)
    2330           0 :                 return c;
    2331             : 
    2332          52 :         c = (b / (USEC_PER_SEC*10)) * (USEC_PER_SEC*10) + (e->perturb % (USEC_PER_SEC*10));
    2333          52 :         if (c >= b) {
    2334          41 :                 if (_unlikely_(c < USEC_PER_SEC*10))
    2335           0 :                         return b;
    2336             : 
    2337          41 :                 c -= USEC_PER_SEC*10;
    2338             :         }
    2339             : 
    2340          52 :         if (c >= a)
    2341           0 :                 return c;
    2342             : 
    2343          52 :         c = (b / USEC_PER_SEC) * USEC_PER_SEC + (e->perturb % USEC_PER_SEC);
    2344          52 :         if (c >= b) {
    2345          13 :                 if (_unlikely_(c < USEC_PER_SEC))
    2346           0 :                         return b;
    2347             : 
    2348          13 :                 c -= USEC_PER_SEC;
    2349             :         }
    2350             : 
    2351          52 :         if (c >= a)
    2352           2 :                 return c;
    2353             : 
    2354          50 :         c = (b / (USEC_PER_MSEC*250)) * (USEC_PER_MSEC*250) + (e->perturb % (USEC_PER_MSEC*250));
    2355          50 :         if (c >= b) {
    2356          23 :                 if (_unlikely_(c < USEC_PER_MSEC*250))
    2357           0 :                         return b;
    2358             : 
    2359          23 :                 c -= USEC_PER_MSEC*250;
    2360             :         }
    2361             : 
    2362          50 :         if (c >= a)
    2363          47 :                 return c;
    2364             : 
    2365           3 :         return b;
    2366             : }
    2367             : 
    2368      258615 : static int event_arm_timer(
    2369             :                 sd_event *e,
    2370             :                 struct clock_data *d) {
    2371             : 
    2372      258615 :         struct itimerspec its = {};
    2373             :         sd_event_source *a, *b;
    2374             :         usec_t t;
    2375             :         int r;
    2376             : 
    2377      258615 :         assert(e);
    2378      258615 :         assert(d);
    2379             : 
    2380      258615 :         if (!d->needs_rearm)
    2381      257170 :                 return 0;
    2382             :         else
    2383        1445 :                 d->needs_rearm = false;
    2384             : 
    2385        1445 :         a = prioq_peek(d->earliest);
    2386        1445 :         if (!a || a->enabled == SD_EVENT_OFF || a->time.next == USEC_INFINITY) {
    2387             : 
    2388        1358 :                 if (d->fd < 0)
    2389           0 :                         return 0;
    2390             : 
    2391        1358 :                 if (d->next == USEC_INFINITY)
    2392        1354 :                         return 0;
    2393             : 
    2394             :                 /* disarm */
    2395           4 :                 r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
    2396           4 :                 if (r < 0)
    2397           0 :                         return r;
    2398             : 
    2399           4 :                 d->next = USEC_INFINITY;
    2400           4 :                 return 0;
    2401             :         }
    2402             : 
    2403          87 :         b = prioq_peek(d->latest);
    2404          87 :         assert_se(b && b->enabled != SD_EVENT_OFF);
    2405             : 
    2406          87 :         t = sleep_between(e, a->time.next, time_event_source_latest(b));
    2407          87 :         if (d->next == t)
    2408          25 :                 return 0;
    2409             : 
    2410          62 :         assert_se(d->fd >= 0);
    2411             : 
    2412          62 :         if (t == 0) {
    2413             :                 /* We don' want to disarm here, just mean some time looooong ago. */
    2414          34 :                 its.it_value.tv_sec = 0;
    2415          34 :                 its.it_value.tv_nsec = 1;
    2416             :         } else
    2417          28 :                 timespec_store(&its.it_value, t);
    2418             : 
    2419          62 :         r = timerfd_settime(d->fd, TFD_TIMER_ABSTIME, &its, NULL);
    2420          62 :         if (r < 0)
    2421           0 :                 return -errno;
    2422             : 
    2423          62 :         d->next = t;
    2424          62 :         return 0;
    2425             : }
    2426             : 
    2427      181965 : static int process_io(sd_event *e, sd_event_source *s, uint32_t revents) {
    2428      181965 :         assert(e);
    2429      181965 :         assert(s);
    2430      181965 :         assert(s->type == SOURCE_IO);
    2431             : 
    2432             :         /* If the event source was already pending, we just OR in the
    2433             :          * new revents, otherwise we reset the value. The ORing is
    2434             :          * necessary to handle EPOLLONESHOT events properly where
    2435             :          * readability might happen independently of writability, and
    2436             :          * we need to keep track of both */
    2437             : 
    2438      181965 :         if (s->pending)
    2439      180827 :                 s->io.revents |= revents;
    2440             :         else
    2441        1138 :                 s->io.revents = revents;
    2442             : 
    2443      181965 :         return source_set_pending(s, true);
    2444             : }
    2445             : 
    2446          38 : static int flush_timer(sd_event *e, int fd, uint32_t events, usec_t *next) {
    2447             :         uint64_t x;
    2448             :         ssize_t ss;
    2449             : 
    2450          38 :         assert(e);
    2451          38 :         assert(fd >= 0);
    2452             : 
    2453          38 :         assert_return(events == EPOLLIN, -EIO);
    2454             : 
    2455          38 :         ss = read(fd, &x, sizeof(x));
    2456          38 :         if (ss < 0) {
    2457           0 :                 if (IN_SET(errno, EAGAIN, EINTR))
    2458           0 :                         return 0;
    2459             : 
    2460           0 :                 return -errno;
    2461             :         }
    2462             : 
    2463          38 :         if (_unlikely_(ss != sizeof(x)))
    2464           0 :                 return -EIO;
    2465             : 
    2466          38 :         if (next)
    2467          38 :                 *next = USEC_INFINITY;
    2468             : 
    2469          38 :         return 0;
    2470             : }
    2471             : 
    2472      258615 : static int process_timer(
    2473             :                 sd_event *e,
    2474             :                 usec_t n,
    2475             :                 struct clock_data *d) {
    2476             : 
    2477             :         sd_event_source *s;
    2478             :         int r;
    2479             : 
    2480      258615 :         assert(e);
    2481      258615 :         assert(d);
    2482             : 
    2483             :         for (;;) {
    2484      258653 :                 s = prioq_peek(d->earliest);
    2485      258653 :                 if (!s ||
    2486        2286 :                     s->time.next > n ||
    2487        2199 :                     s->enabled == SD_EVENT_OFF ||
    2488             :                     s->pending)
    2489             :                         break;
    2490             : 
    2491          38 :                 r = source_set_pending(s, true);
    2492          38 :                 if (r < 0)
    2493           0 :                         return r;
    2494             : 
    2495          38 :                 prioq_reshuffle(d->earliest, s, &s->time.earliest_index);
    2496          38 :                 prioq_reshuffle(d->latest, s, &s->time.latest_index);
    2497          38 :                 d->needs_rearm = true;
    2498             :         }
    2499             : 
    2500      258615 :         return 0;
    2501             : }
    2502             : 
    2503           2 : static int process_child(sd_event *e) {
    2504             :         sd_event_source *s;
    2505             :         Iterator i;
    2506             :         int r;
    2507             : 
    2508           2 :         assert(e);
    2509             : 
    2510           2 :         e->need_process_child = false;
    2511             : 
    2512             :         /*
    2513             :            So, this is ugly. We iteratively invoke waitid() with P_PID
    2514             :            + WNOHANG for each PID we wait for, instead of using
    2515             :            P_ALL. This is because we only want to get child
    2516             :            information of very specific child processes, and not all
    2517             :            of them. We might not have processed the SIGCHLD even of a
    2518             :            previous invocation and we don't want to maintain a
    2519             :            unbounded *per-child* event queue, hence we really don't
    2520             :            want anything flushed out of the kernel's queue that we
    2521             :            don't care about. Since this is O(n) this means that if you
    2522             :            have a lot of processes you probably want to handle SIGCHLD
    2523             :            yourself.
    2524             : 
    2525             :            We do not reap the children here (by using WNOWAIT), this
    2526             :            is only done after the event source is dispatched so that
    2527             :            the callback still sees the process as a zombie.
    2528             :         */
    2529             : 
    2530           4 :         HASHMAP_FOREACH(s, e->child_sources, i) {
    2531           2 :                 assert(s->type == SOURCE_CHILD);
    2532             : 
    2533           2 :                 if (s->pending)
    2534           0 :                         continue;
    2535             : 
    2536           2 :                 if (s->enabled == SD_EVENT_OFF)
    2537           0 :                         continue;
    2538             : 
    2539           2 :                 zero(s->child.siginfo);
    2540           2 :                 r = waitid(P_PID, s->child.pid, &s->child.siginfo,
    2541           2 :                            WNOHANG | (s->child.options & WEXITED ? WNOWAIT : 0) | s->child.options);
    2542           2 :                 if (r < 0)
    2543           0 :                         return -errno;
    2544             : 
    2545           2 :                 if (s->child.siginfo.si_pid != 0) {
    2546           1 :                         bool zombie = IN_SET(s->child.siginfo.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED);
    2547             : 
    2548           1 :                         if (!zombie && (s->child.options & WEXITED)) {
    2549             :                                 /* If the child isn't dead then let's
    2550             :                                  * immediately remove the state change
    2551             :                                  * from the queue, since there's no
    2552             :                                  * benefit in leaving it queued */
    2553             : 
    2554           0 :                                 assert(s->child.options & (WSTOPPED|WCONTINUED));
    2555           0 :                                 waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|(s->child.options & (WSTOPPED|WCONTINUED)));
    2556             :                         }
    2557             : 
    2558           1 :                         r = source_set_pending(s, true);
    2559           1 :                         if (r < 0)
    2560           0 :                                 return r;
    2561             :                 }
    2562             :         }
    2563             : 
    2564           2 :         return 0;
    2565             : }
    2566             : 
    2567           8 : static int process_signal(sd_event *e, struct signal_data *d, uint32_t events) {
    2568           8 :         bool read_one = false;
    2569             :         int r;
    2570             : 
    2571           8 :         assert(e);
    2572           8 :         assert(d);
    2573           8 :         assert_return(events == EPOLLIN, -EIO);
    2574             : 
    2575             :         /* If there's a signal queued on this priority and SIGCHLD is
    2576             :            on this priority too, then make sure to recheck the
    2577             :            children we watch. This is because we only ever dequeue
    2578             :            the first signal per priority, and if we dequeue one, and
    2579             :            SIGCHLD might be enqueued later we wouldn't know, but we
    2580             :            might have higher priority children we care about hence we
    2581             :            need to check that explicitly. */
    2582             : 
    2583           8 :         if (sigismember(&d->sigset, SIGCHLD))
    2584           1 :                 e->need_process_child = true;
    2585             : 
    2586             :         /* If there's already an event source pending for this
    2587             :          * priority we don't read another */
    2588           8 :         if (d->current)
    2589           2 :                 return 0;
    2590             : 
    2591           1 :         for (;;) {
    2592             :                 struct signalfd_siginfo si;
    2593             :                 ssize_t n;
    2594           7 :                 sd_event_source *s = NULL;
    2595             : 
    2596           7 :                 n = read(d->fd, &si, sizeof(si));
    2597           7 :                 if (n < 0) {
    2598           1 :                         if (IN_SET(errno, EAGAIN, EINTR))
    2599           6 :                                 return read_one;
    2600             : 
    2601           0 :                         return -errno;
    2602             :                 }
    2603             : 
    2604           6 :                 if (_unlikely_(n != sizeof(si)))
    2605           0 :                         return -EIO;
    2606             : 
    2607           6 :                 assert(SIGNAL_VALID(si.ssi_signo));
    2608             : 
    2609           6 :                 read_one = true;
    2610             : 
    2611           6 :                 if (e->signal_sources)
    2612           6 :                         s = e->signal_sources[si.ssi_signo];
    2613           6 :                 if (!s)
    2614           1 :                         continue;
    2615           5 :                 if (s->pending)
    2616           0 :                         continue;
    2617             : 
    2618           5 :                 s->signal.siginfo = si;
    2619           5 :                 d->current = s;
    2620             : 
    2621           5 :                 r = source_set_pending(s, true);
    2622           5 :                 if (r < 0)
    2623           0 :                         return r;
    2624             : 
    2625           5 :                 return 1;
    2626             :         }
    2627             : }
    2628             : 
    2629       82396 : static int event_inotify_data_read(sd_event *e, struct inotify_data *d, uint32_t revents) {
    2630             :         ssize_t n;
    2631             : 
    2632       82396 :         assert(e);
    2633       82396 :         assert(d);
    2634             : 
    2635       82396 :         assert_return(revents == EPOLLIN, -EIO);
    2636             : 
    2637             :         /* If there's already an event source pending for this priority, don't read another */
    2638       82396 :         if (d->n_pending > 0)
    2639       49448 :                 return 0;
    2640             : 
    2641             :         /* Is the read buffer non-empty? If so, let's not read more */
    2642       32948 :         if (d->buffer_filled > 0)
    2643       28826 :                 return 0;
    2644             : 
    2645        4122 :         n = read(d->fd, &d->buffer, sizeof(d->buffer));
    2646        4122 :         if (n < 0) {
    2647           0 :                 if (IN_SET(errno, EAGAIN, EINTR))
    2648           0 :                         return 0;
    2649             : 
    2650           0 :                 return -errno;
    2651             :         }
    2652             : 
    2653        4122 :         assert(n > 0);
    2654        4122 :         d->buffer_filled = (size_t) n;
    2655        4122 :         LIST_PREPEND(buffered, e->inotify_data_buffered, d);
    2656             : 
    2657        4122 :         return 1;
    2658             : }
    2659             : 
    2660       32975 : static void event_inotify_data_drop(sd_event *e, struct inotify_data *d, size_t sz) {
    2661       32975 :         assert(e);
    2662       32975 :         assert(d);
    2663       32975 :         assert(sz <= d->buffer_filled);
    2664             : 
    2665       32975 :         if (sz == 0)
    2666           0 :                 return;
    2667             : 
    2668             :         /* Move the rest to the buffer to the front, in order to get things properly aligned again */
    2669       32975 :         memmove(d->buffer.raw, d->buffer.raw + sz, d->buffer_filled - sz);
    2670       32975 :         d->buffer_filled -= sz;
    2671             : 
    2672       32975 :         if (d->buffer_filled == 0)
    2673        4121 :                 LIST_REMOVE(buffered, e->inotify_data_buffered, d);
    2674             : }
    2675             : 
    2676       82438 : static int event_inotify_data_process(sd_event *e, struct inotify_data *d) {
    2677             :         int r;
    2678             : 
    2679       82438 :         assert(e);
    2680       82438 :         assert(d);
    2681             : 
    2682             :         /* If there's already an event source pending for this priority, don't read another */
    2683       82438 :         if (d->n_pending > 0)
    2684       49463 :                 return 0;
    2685             : 
    2686       32975 :         while (d->buffer_filled > 0) {
    2687             :                 size_t sz;
    2688             : 
    2689             :                 /* Let's validate that the event structures are complete */
    2690       32975 :                 if (d->buffer_filled < offsetof(struct inotify_event, name))
    2691           0 :                         return -EIO;
    2692             : 
    2693       32975 :                 sz = offsetof(struct inotify_event, name) + d->buffer.ev.len;
    2694       32975 :                 if (d->buffer_filled < sz)
    2695           0 :                         return -EIO;
    2696             : 
    2697       32975 :                 if (d->buffer.ev.mask & IN_Q_OVERFLOW) {
    2698             :                         struct inode_data *inode_data;
    2699             :                         Iterator i;
    2700             : 
    2701             :                         /* The queue overran, let's pass this event to all event sources connected to this inotify
    2702             :                          * object */
    2703             : 
    2704           5 :                         HASHMAP_FOREACH(inode_data, d->inodes, i) {
    2705             :                                 sd_event_source *s;
    2706             : 
    2707           7 :                                 LIST_FOREACH(inotify.by_inode_data, s, inode_data->event_sources) {
    2708             : 
    2709           4 :                                         if (s->enabled == SD_EVENT_OFF)
    2710           0 :                                                 continue;
    2711             : 
    2712           4 :                                         r = source_set_pending(s, true);
    2713           4 :                                         if (r < 0)
    2714           0 :                                                 return r;
    2715             :                                 }
    2716             :                         }
    2717             :                 } else {
    2718             :                         struct inode_data *inode_data;
    2719             :                         sd_event_source *s;
    2720             : 
    2721             :                         /* Find the inode object for this watch descriptor. If IN_IGNORED is set we also remove it from
    2722             :                          * our watch descriptor table. */
    2723       32973 :                         if (d->buffer.ev.mask & IN_IGNORED) {
    2724             : 
    2725           1 :                                 inode_data = hashmap_remove(d->wd, INT_TO_PTR(d->buffer.ev.wd));
    2726           1 :                                 if (!inode_data) {
    2727           0 :                                         event_inotify_data_drop(e, d, sz);
    2728           0 :                                         continue;
    2729             :                                 }
    2730             : 
    2731             :                                 /* The watch descriptor was removed by the kernel, let's drop it here too */
    2732           1 :                                 inode_data->wd = -1;
    2733             :                         } else {
    2734       32972 :                                 inode_data = hashmap_get(d->wd, INT_TO_PTR(d->buffer.ev.wd));
    2735       32972 :                                 if (!inode_data) {
    2736           0 :                                         event_inotify_data_drop(e, d, sz);
    2737           0 :                                         continue;
    2738             :                                 }
    2739             :                         }
    2740             : 
    2741             :                         /* Trigger all event sources that are interested in these events. Also trigger all event
    2742             :                          * sources if IN_IGNORED or IN_UNMOUNT is set. */
    2743       82432 :                         LIST_FOREACH(inotify.by_inode_data, s, inode_data->event_sources) {
    2744             : 
    2745       49459 :                                 if (s->enabled == SD_EVENT_OFF)
    2746           0 :                                         continue;
    2747             : 
    2748       49459 :                                 if ((d->buffer.ev.mask & (IN_IGNORED|IN_UNMOUNT)) == 0 &&
    2749       49458 :                                     (s->inotify.mask & d->buffer.ev.mask & IN_ALL_EVENTS) == 0)
    2750           1 :                                         continue;
    2751             : 
    2752       49458 :                                 r = source_set_pending(s, true);
    2753       49458 :                                 if (r < 0)
    2754           0 :                                         return r;
    2755             :                         }
    2756             :                 }
    2757             : 
    2758             :                 /* Something pending now? If so, let's finish, otherwise let's read more. */
    2759       32975 :                 if (d->n_pending > 0)
    2760       32975 :                         return 1;
    2761             :         }
    2762             : 
    2763           0 :         return 0;
    2764             : }
    2765             : 
    2766       51723 : static int process_inotify(sd_event *e) {
    2767             :         struct inotify_data *d;
    2768       51723 :         int r, done = 0;
    2769             : 
    2770       51723 :         assert(e);
    2771             : 
    2772      134161 :         LIST_FOREACH(buffered, d, e->inotify_data_buffered) {
    2773       82438 :                 r = event_inotify_data_process(e, d);
    2774       82438 :                 if (r < 0)
    2775           0 :                         return r;
    2776       82438 :                 if (r > 0)
    2777       32975 :                         done ++;
    2778             :         }
    2779             : 
    2780       51723 :         return done;
    2781             : }
    2782             : 
    2783       51859 : static int source_dispatch(sd_event_source *s) {
    2784             :         EventSourceType saved_type;
    2785       51859 :         int r = 0;
    2786             : 
    2787       51859 :         assert(s);
    2788       51859 :         assert(s->pending || s->type == SOURCE_EXIT);
    2789             : 
    2790             :         /* Save the event source type, here, so that we still know it after the event callback which might invalidate
    2791             :          * the event. */
    2792       51859 :         saved_type = s->type;
    2793             : 
    2794       51859 :         if (!IN_SET(s->type, SOURCE_DEFER, SOURCE_EXIT)) {
    2795       50174 :                 r = source_set_pending(s, false);
    2796       50174 :                 if (r < 0)
    2797           0 :                         return r;
    2798             :         }
    2799             : 
    2800       51859 :         if (s->type != SOURCE_POST) {
    2801             :                 sd_event_source *z;
    2802             :                 Iterator i;
    2803             : 
    2804             :                 /* If we execute a non-post source, let's mark all
    2805             :                  * post sources as pending */
    2806             : 
    2807       51861 :                 SET_FOREACH(z, s->event->post_sources, i) {
    2808           5 :                         if (z->enabled == SD_EVENT_OFF)
    2809           0 :                                 continue;
    2810             : 
    2811           5 :                         r = source_set_pending(z, true);
    2812           5 :                         if (r < 0)
    2813           0 :                                 return r;
    2814             :                 }
    2815             :         }
    2816             : 
    2817       51859 :         if (s->enabled == SD_EVENT_ONESHOT) {
    2818         650 :                 r = sd_event_source_set_enabled(s, SD_EVENT_OFF);
    2819         650 :                 if (r < 0)
    2820           0 :                         return r;
    2821             :         }
    2822             : 
    2823       51859 :         s->dispatching = true;
    2824             : 
    2825       51859 :         switch (s->type) {
    2826             : 
    2827         666 :         case SOURCE_IO:
    2828         666 :                 r = s->io.callback(s, s->io.fd, s->io.revents, s->userdata);
    2829         666 :                 break;
    2830             : 
    2831          37 :         case SOURCE_TIME_REALTIME:
    2832             :         case SOURCE_TIME_BOOTTIME:
    2833             :         case SOURCE_TIME_MONOTONIC:
    2834             :         case SOURCE_TIME_REALTIME_ALARM:
    2835             :         case SOURCE_TIME_BOOTTIME_ALARM:
    2836          37 :                 r = s->time.callback(s, s->time.next, s->userdata);
    2837          37 :                 break;
    2838             : 
    2839           5 :         case SOURCE_SIGNAL:
    2840           5 :                 r = s->signal.callback(s, &s->signal.siginfo, s->userdata);
    2841           5 :                 break;
    2842             : 
    2843           1 :         case SOURCE_CHILD: {
    2844             :                 bool zombie;
    2845             : 
    2846           1 :                 zombie = IN_SET(s->child.siginfo.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED);
    2847             : 
    2848           1 :                 r = s->child.callback(s, &s->child.siginfo, s->userdata);
    2849             : 
    2850             :                 /* Now, reap the PID for good. */
    2851           1 :                 if (zombie)
    2852           1 :                         (void) waitid(P_PID, s->child.pid, &s->child.siginfo, WNOHANG|WEXITED);
    2853             : 
    2854           1 :                 break;
    2855             :         }
    2856             : 
    2857        1547 :         case SOURCE_DEFER:
    2858        1547 :                 r = s->defer.callback(s, s->userdata);
    2859        1547 :                 break;
    2860             : 
    2861           3 :         case SOURCE_POST:
    2862           3 :                 r = s->post.callback(s, s->userdata);
    2863           3 :                 break;
    2864             : 
    2865         138 :         case SOURCE_EXIT:
    2866         138 :                 r = s->exit.callback(s, s->userdata);
    2867         138 :                 break;
    2868             : 
    2869       49462 :         case SOURCE_INOTIFY: {
    2870       49462 :                 struct sd_event *e = s->event;
    2871             :                 struct inotify_data *d;
    2872             :                 size_t sz;
    2873             : 
    2874       49462 :                 assert(s->inotify.inode_data);
    2875       49462 :                 assert_se(d = s->inotify.inode_data->inotify_data);
    2876             : 
    2877       49462 :                 assert(d->buffer_filled >= offsetof(struct inotify_event, name));
    2878       49462 :                 sz = offsetof(struct inotify_event, name) + d->buffer.ev.len;
    2879       49462 :                 assert(d->buffer_filled >= sz);
    2880             : 
    2881       49462 :                 r = s->inotify.callback(s, &d->buffer.ev, s->userdata);
    2882             : 
    2883             :                 /* When no event is pending anymore on this inotify object, then let's drop the event from the
    2884             :                  * buffer. */
    2885       49462 :                 if (d->n_pending == 0)
    2886       32975 :                         event_inotify_data_drop(e, d, sz);
    2887             : 
    2888       49462 :                 break;
    2889             :         }
    2890             : 
    2891           0 :         case SOURCE_WATCHDOG:
    2892             :         case _SOURCE_EVENT_SOURCE_TYPE_MAX:
    2893             :         case _SOURCE_EVENT_SOURCE_TYPE_INVALID:
    2894           0 :                 assert_not_reached("Wut? I shouldn't exist.");
    2895             :         }
    2896             : 
    2897       51859 :         s->dispatching = false;
    2898             : 
    2899       51859 :         if (r < 0)
    2900           0 :                 log_debug_errno(r, "Event source %s (type %s) returned error, disabling: %m",
    2901             :                                 strna(s->description), event_source_type_to_string(saved_type));
    2902             : 
    2903       51859 :         if (s->n_ref == 0)
    2904         484 :                 source_free(s);
    2905       51375 :         else if (r < 0)
    2906           0 :                 sd_event_source_set_enabled(s, SD_EVENT_OFF);
    2907             : 
    2908       51859 :         return 1;
    2909             : }
    2910             : 
    2911       51723 : static int event_prepare(sd_event *e) {
    2912             :         int r;
    2913             : 
    2914       51723 :         assert(e);
    2915             : 
    2916      368064 :         for (;;) {
    2917             :                 sd_event_source *s;
    2918             : 
    2919      419787 :                 s = prioq_peek(e->prepare);
    2920      419787 :                 if (!s || s->prepare_iteration == e->iteration || s->enabled == SD_EVENT_OFF)
    2921             :                         break;
    2922             : 
    2923      368064 :                 s->prepare_iteration = e->iteration;
    2924      368064 :                 r = prioq_reshuffle(e->prepare, s, &s->prepare_index);
    2925      368064 :                 if (r < 0)
    2926           0 :                         return r;
    2927             : 
    2928      368064 :                 assert(s->prepare);
    2929             : 
    2930      368064 :                 s->dispatching = true;
    2931      368064 :                 r = s->prepare(s, s->userdata);
    2932      368064 :                 s->dispatching = false;
    2933             : 
    2934      368064 :                 if (r < 0)
    2935           0 :                         log_debug_errno(r, "Prepare callback of event source %s (type %s) returned error, disabling: %m",
    2936             :                                         strna(s->description), event_source_type_to_string(s->type));
    2937             : 
    2938      368064 :                 if (s->n_ref == 0)
    2939           0 :                         source_free(s);
    2940      368064 :                 else if (r < 0)
    2941           0 :                         sd_event_source_set_enabled(s, SD_EVENT_OFF);
    2942             :         }
    2943             : 
    2944       51723 :         return 0;
    2945             : }
    2946             : 
    2947         153 : static int dispatch_exit(sd_event *e) {
    2948             :         sd_event_source *p;
    2949         153 :         _cleanup_(sd_event_unrefp) sd_event *ref = NULL;
    2950             :         int r;
    2951             : 
    2952         153 :         assert(e);
    2953             : 
    2954         153 :         p = prioq_peek(e->exit);
    2955         153 :         if (!p || p->enabled == SD_EVENT_OFF) {
    2956          15 :                 e->state = SD_EVENT_FINISHED;
    2957          15 :                 return 0;
    2958             :         }
    2959             : 
    2960         138 :         ref = sd_event_ref(e);
    2961         138 :         e->iteration++;
    2962         138 :         e->state = SD_EVENT_EXITING;
    2963         138 :         r = source_dispatch(p);
    2964         138 :         e->state = SD_EVENT_INITIAL;
    2965         138 :         return r;
    2966             : }
    2967             : 
    2968      155167 : static sd_event_source* event_next_pending(sd_event *e) {
    2969             :         sd_event_source *p;
    2970             : 
    2971      155167 :         assert(e);
    2972             : 
    2973      155167 :         p = prioq_peek(e->pending);
    2974      155167 :         if (!p)
    2975       16576 :                 return NULL;
    2976             : 
    2977      138591 :         if (p->enabled == SD_EVENT_OFF)
    2978          26 :                 return NULL;
    2979             : 
    2980      138565 :         return p;
    2981             : }
    2982             : 
    2983           0 : static int arm_watchdog(sd_event *e) {
    2984           0 :         struct itimerspec its = {};
    2985             :         usec_t t;
    2986             :         int r;
    2987             : 
    2988           0 :         assert(e);
    2989           0 :         assert(e->watchdog_fd >= 0);
    2990             : 
    2991           0 :         t = sleep_between(e,
    2992           0 :                           e->watchdog_last + (e->watchdog_period / 2),
    2993           0 :                           e->watchdog_last + (e->watchdog_period * 3 / 4));
    2994             : 
    2995           0 :         timespec_store(&its.it_value, t);
    2996             : 
    2997             :         /* Make sure we never set the watchdog to 0, which tells the
    2998             :          * kernel to disable it. */
    2999           0 :         if (its.it_value.tv_sec == 0 && its.it_value.tv_nsec == 0)
    3000           0 :                 its.it_value.tv_nsec = 1;
    3001             : 
    3002           0 :         r = timerfd_settime(e->watchdog_fd, TFD_TIMER_ABSTIME, &its, NULL);
    3003           0 :         if (r < 0)
    3004           0 :                 return -errno;
    3005             : 
    3006           0 :         return 0;
    3007             : }
    3008             : 
    3009       51723 : static int process_watchdog(sd_event *e) {
    3010       51723 :         assert(e);
    3011             : 
    3012       51723 :         if (!e->watchdog)
    3013       51723 :                 return 0;
    3014             : 
    3015             :         /* Don't notify watchdog too often */
    3016           0 :         if (e->watchdog_last + e->watchdog_period / 4 > e->timestamp.monotonic)
    3017           0 :                 return 0;
    3018             : 
    3019           0 :         sd_notify(false, "WATCHDOG=1");
    3020           0 :         e->watchdog_last = e->timestamp.monotonic;
    3021             : 
    3022           0 :         return arm_watchdog(e);
    3023             : }
    3024             : 
    3025       51723 : static void event_close_inode_data_fds(sd_event *e) {
    3026             :         struct inode_data *d;
    3027             : 
    3028       51723 :         assert(e);
    3029             : 
    3030             :         /* Close the fds pointing to the inodes to watch now. We need to close them as they might otherwise pin
    3031             :          * filesystems. But we can't close them right-away as we need them as long as the user still wants to make
    3032             :          * adjustments to the even source, such as changing the priority (which requires us to remove and re-add a watch
    3033             :          * for the inode). Hence, let's close them when entering the first iteration after they were added, as a
    3034             :          * compromise. */
    3035             : 
    3036       51729 :         while ((d = e->inode_data_to_close)) {
    3037           6 :                 assert(d->fd >= 0);
    3038           6 :                 d->fd = safe_close(d->fd);
    3039             : 
    3040           6 :                 LIST_REMOVE(to_close, e->inode_data_to_close, d);
    3041             :         }
    3042       51723 : }
    3043             : 
    3044       51876 : _public_ int sd_event_prepare(sd_event *e) {
    3045             :         int r;
    3046             : 
    3047       51876 :         assert_return(e, -EINVAL);
    3048       51876 :         assert_return(e = event_resolve(e), -ENOPKG);
    3049       51876 :         assert_return(!event_pid_changed(e), -ECHILD);
    3050       51876 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    3051       51876 :         assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
    3052             : 
    3053       51876 :         if (e->exit_requested)
    3054         153 :                 goto pending;
    3055             : 
    3056       51723 :         e->iteration++;
    3057             : 
    3058       51723 :         e->state = SD_EVENT_PREPARING;
    3059       51723 :         r = event_prepare(e);
    3060       51723 :         e->state = SD_EVENT_INITIAL;
    3061       51723 :         if (r < 0)
    3062           0 :                 return r;
    3063             : 
    3064       51723 :         r = event_arm_timer(e, &e->realtime);
    3065       51723 :         if (r < 0)
    3066           0 :                 return r;
    3067             : 
    3068       51723 :         r = event_arm_timer(e, &e->boottime);
    3069       51723 :         if (r < 0)
    3070           0 :                 return r;
    3071             : 
    3072       51723 :         r = event_arm_timer(e, &e->monotonic);
    3073       51723 :         if (r < 0)
    3074           0 :                 return r;
    3075             : 
    3076       51723 :         r = event_arm_timer(e, &e->realtime_alarm);
    3077       51723 :         if (r < 0)
    3078           0 :                 return r;
    3079             : 
    3080       51723 :         r = event_arm_timer(e, &e->boottime_alarm);
    3081       51723 :         if (r < 0)
    3082           0 :                 return r;
    3083             : 
    3084       51723 :         event_close_inode_data_fds(e);
    3085             : 
    3086       51723 :         if (event_next_pending(e) || e->need_process_child)
    3087       35123 :                 goto pending;
    3088             : 
    3089       16600 :         e->state = SD_EVENT_ARMED;
    3090             : 
    3091       16600 :         return 0;
    3092             : 
    3093       35276 : pending:
    3094       35276 :         e->state = SD_EVENT_ARMED;
    3095       35276 :         r = sd_event_wait(e, 0);
    3096       35276 :         if (r == 0)
    3097           0 :                 e->state = SD_EVENT_ARMED;
    3098             : 
    3099       35276 :         return r;
    3100             : }
    3101             : 
    3102       51876 : _public_ int sd_event_wait(sd_event *e, uint64_t timeout) {
    3103             :         struct epoll_event *ev_queue;
    3104             :         unsigned ev_queue_max;
    3105             :         int r, m, i;
    3106             : 
    3107       51876 :         assert_return(e, -EINVAL);
    3108       51876 :         assert_return(e = event_resolve(e), -ENOPKG);
    3109       51876 :         assert_return(!event_pid_changed(e), -ECHILD);
    3110       51876 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    3111       51876 :         assert_return(e->state == SD_EVENT_ARMED, -EBUSY);
    3112             : 
    3113       51876 :         if (e->exit_requested) {
    3114         153 :                 e->state = SD_EVENT_PENDING;
    3115         153 :                 return 1;
    3116             :         }
    3117             : 
    3118       51723 :         ev_queue_max = MAX(e->n_sources, 1u);
    3119       51723 :         ev_queue = newa(struct epoll_event, ev_queue_max);
    3120             : 
    3121             :         /* If we still have inotify data buffered, then query the other fds, but don't wait on it */
    3122       51723 :         if (e->inotify_data_buffered)
    3123       47401 :                 timeout = 0;
    3124             : 
    3125      101283 :         m = epoll_wait(e->epoll_fd, ev_queue, ev_queue_max,
    3126       49560 :                        timeout == (uint64_t) -1 ? -1 : (int) DIV_ROUND_UP(timeout, USEC_PER_MSEC));
    3127       51723 :         if (m < 0) {
    3128           0 :                 if (errno == EINTR) {
    3129           0 :                         e->state = SD_EVENT_PENDING;
    3130           0 :                         return 1;
    3131             :                 }
    3132             : 
    3133           0 :                 r = -errno;
    3134           0 :                 goto finish;
    3135             :         }
    3136             : 
    3137       51723 :         triple_timestamp_get(&e->timestamp);
    3138             : 
    3139      316130 :         for (i = 0; i < m; i++) {
    3140             : 
    3141      264407 :                 if (ev_queue[i].data.ptr == INT_TO_PTR(SOURCE_WATCHDOG))
    3142           0 :                         r = flush_timer(e, e->watchdog_fd, ev_queue[i].events, NULL);
    3143             :                 else {
    3144      264407 :                         WakeupType *t = ev_queue[i].data.ptr;
    3145             : 
    3146      264407 :                         switch (*t) {
    3147             : 
    3148      181965 :                         case WAKEUP_EVENT_SOURCE:
    3149      181965 :                                 r = process_io(e, ev_queue[i].data.ptr, ev_queue[i].events);
    3150      181965 :                                 break;
    3151             : 
    3152          38 :                         case WAKEUP_CLOCK_DATA: {
    3153          38 :                                 struct clock_data *d = ev_queue[i].data.ptr;
    3154          38 :                                 r = flush_timer(e, d->fd, ev_queue[i].events, &d->next);
    3155          38 :                                 break;
    3156             :                         }
    3157             : 
    3158           8 :                         case WAKEUP_SIGNAL_DATA:
    3159           8 :                                 r = process_signal(e, ev_queue[i].data.ptr, ev_queue[i].events);
    3160           8 :                                 break;
    3161             : 
    3162       82396 :                         case WAKEUP_INOTIFY_DATA:
    3163       82396 :                                 r = event_inotify_data_read(e, ev_queue[i].data.ptr, ev_queue[i].events);
    3164       82396 :                                 break;
    3165             : 
    3166           0 :                         default:
    3167           0 :                                 assert_not_reached("Invalid wake-up pointer");
    3168             :                         }
    3169             :                 }
    3170      264407 :                 if (r < 0)
    3171           0 :                         goto finish;
    3172             :         }
    3173             : 
    3174       51723 :         r = process_watchdog(e);
    3175       51723 :         if (r < 0)
    3176           0 :                 goto finish;
    3177             : 
    3178       51723 :         r = process_timer(e, e->timestamp.realtime, &e->realtime);
    3179       51723 :         if (r < 0)
    3180           0 :                 goto finish;
    3181             : 
    3182       51723 :         r = process_timer(e, e->timestamp.boottime, &e->boottime);
    3183       51723 :         if (r < 0)
    3184           0 :                 goto finish;
    3185             : 
    3186       51723 :         r = process_timer(e, e->timestamp.monotonic, &e->monotonic);
    3187       51723 :         if (r < 0)
    3188           0 :                 goto finish;
    3189             : 
    3190       51723 :         r = process_timer(e, e->timestamp.realtime, &e->realtime_alarm);
    3191       51723 :         if (r < 0)
    3192           0 :                 goto finish;
    3193             : 
    3194       51723 :         r = process_timer(e, e->timestamp.boottime, &e->boottime_alarm);
    3195       51723 :         if (r < 0)
    3196           0 :                 goto finish;
    3197             : 
    3198       51723 :         if (e->need_process_child) {
    3199           2 :                 r = process_child(e);
    3200           2 :                 if (r < 0)
    3201           0 :                         goto finish;
    3202             :         }
    3203             : 
    3204       51723 :         r = process_inotify(e);
    3205       51723 :         if (r < 0)
    3206           0 :                 goto finish;
    3207             : 
    3208       51723 :         if (event_next_pending(e)) {
    3209       51721 :                 e->state = SD_EVENT_PENDING;
    3210             : 
    3211       51721 :                 return 1;
    3212             :         }
    3213             : 
    3214           2 :         r = 0;
    3215             : 
    3216           2 : finish:
    3217           2 :         e->state = SD_EVENT_INITIAL;
    3218             : 
    3219           2 :         return r;
    3220             : }
    3221             : 
    3222       51874 : _public_ int sd_event_dispatch(sd_event *e) {
    3223             :         sd_event_source *p;
    3224             :         int r;
    3225             : 
    3226       51874 :         assert_return(e, -EINVAL);
    3227       51874 :         assert_return(e = event_resolve(e), -ENOPKG);
    3228       51874 :         assert_return(!event_pid_changed(e), -ECHILD);
    3229       51874 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    3230       51874 :         assert_return(e->state == SD_EVENT_PENDING, -EBUSY);
    3231             : 
    3232       51874 :         if (e->exit_requested)
    3233         153 :                 return dispatch_exit(e);
    3234             : 
    3235       51721 :         p = event_next_pending(e);
    3236       51721 :         if (p) {
    3237      103442 :                 _cleanup_(sd_event_unrefp) sd_event *ref = NULL;
    3238             : 
    3239       51721 :                 ref = sd_event_ref(e);
    3240       51721 :                 e->state = SD_EVENT_RUNNING;
    3241       51721 :                 r = source_dispatch(p);
    3242       51721 :                 e->state = SD_EVENT_INITIAL;
    3243       51721 :                 return r;
    3244             :         }
    3245             : 
    3246           0 :         e->state = SD_EVENT_INITIAL;
    3247             : 
    3248           0 :         return 1;
    3249             : }
    3250             : 
    3251           0 : static void event_log_delays(sd_event *e) {
    3252             :         char b[ELEMENTSOF(e->delays) * DECIMAL_STR_MAX(unsigned) + 1], *p;
    3253             :         size_t l, i;
    3254             : 
    3255           0 :         p = b;
    3256           0 :         l = sizeof(b);
    3257           0 :         for (i = 0; i < ELEMENTSOF(e->delays); i++) {
    3258           0 :                 l = strpcpyf(&p, l, "%u ", e->delays[i]);
    3259           0 :                 e->delays[i] = 0;
    3260             :         }
    3261           0 :         log_debug("Event loop iterations: %s", b);
    3262           0 : }
    3263             : 
    3264       51876 : _public_ int sd_event_run(sd_event *e, uint64_t timeout) {
    3265             :         int r;
    3266             : 
    3267       51876 :         assert_return(e, -EINVAL);
    3268       51876 :         assert_return(e = event_resolve(e), -ENOPKG);
    3269       51876 :         assert_return(!event_pid_changed(e), -ECHILD);
    3270       51876 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    3271       51876 :         assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
    3272             : 
    3273       51876 :         if (e->profile_delays && e->last_run) {
    3274             :                 usec_t this_run;
    3275             :                 unsigned l;
    3276             : 
    3277           0 :                 this_run = now(CLOCK_MONOTONIC);
    3278             : 
    3279           0 :                 l = u64log2(this_run - e->last_run);
    3280           0 :                 assert(l < sizeof(e->delays));
    3281           0 :                 e->delays[l]++;
    3282             : 
    3283           0 :                 if (this_run - e->last_log >= 5*USEC_PER_SEC) {
    3284           0 :                         event_log_delays(e);
    3285           0 :                         e->last_log = this_run;
    3286             :                 }
    3287             :         }
    3288             : 
    3289       51876 :         r = sd_event_prepare(e);
    3290       51876 :         if (r == 0)
    3291             :                 /* There was nothing? Then wait... */
    3292       16600 :                 r = sd_event_wait(e, timeout);
    3293             : 
    3294       51876 :         if (e->profile_delays)
    3295           0 :                 e->last_run = now(CLOCK_MONOTONIC);
    3296             : 
    3297       51876 :         if (r > 0) {
    3298             :                 /* There's something now, then let's dispatch it */
    3299       51874 :                 r = sd_event_dispatch(e);
    3300       51874 :                 if (r < 0)
    3301           0 :                         return r;
    3302             : 
    3303       51874 :                 return 1;
    3304             :         }
    3305             : 
    3306           2 :         return r;
    3307             : }
    3308             : 
    3309          15 : _public_ int sd_event_loop(sd_event *e) {
    3310          15 :         _cleanup_(sd_event_unrefp) sd_event *ref = NULL;
    3311             :         int r;
    3312             : 
    3313          15 :         assert_return(e, -EINVAL);
    3314          15 :         assert_return(e = event_resolve(e), -ENOPKG);
    3315          15 :         assert_return(!event_pid_changed(e), -ECHILD);
    3316          15 :         assert_return(e->state == SD_EVENT_INITIAL, -EBUSY);
    3317             : 
    3318          15 :         ref = sd_event_ref(e);
    3319             : 
    3320       51838 :         while (e->state != SD_EVENT_FINISHED) {
    3321       51823 :                 r = sd_event_run(e, (uint64_t) -1);
    3322       51823 :                 if (r < 0)
    3323           0 :                         return r;
    3324             :         }
    3325             : 
    3326          15 :         return e->exit_code;
    3327             : }
    3328             : 
    3329           0 : _public_ int sd_event_get_fd(sd_event *e) {
    3330             : 
    3331           0 :         assert_return(e, -EINVAL);
    3332           0 :         assert_return(e = event_resolve(e), -ENOPKG);
    3333           0 :         assert_return(!event_pid_changed(e), -ECHILD);
    3334             : 
    3335           0 :         return e->epoll_fd;
    3336             : }
    3337             : 
    3338           0 : _public_ int sd_event_get_state(sd_event *e) {
    3339           0 :         assert_return(e, -EINVAL);
    3340           0 :         assert_return(e = event_resolve(e), -ENOPKG);
    3341           0 :         assert_return(!event_pid_changed(e), -ECHILD);
    3342             : 
    3343           0 :         return e->state;
    3344             : }
    3345             : 
    3346           3 : _public_ int sd_event_get_exit_code(sd_event *e, int *code) {
    3347           3 :         assert_return(e, -EINVAL);
    3348           3 :         assert_return(e = event_resolve(e), -ENOPKG);
    3349           3 :         assert_return(code, -EINVAL);
    3350           3 :         assert_return(!event_pid_changed(e), -ECHILD);
    3351             : 
    3352           3 :         if (!e->exit_requested)
    3353           0 :                 return -ENODATA;
    3354             : 
    3355           3 :         *code = e->exit_code;
    3356           3 :         return 0;
    3357             : }
    3358             : 
    3359          15 : _public_ int sd_event_exit(sd_event *e, int code) {
    3360          15 :         assert_return(e, -EINVAL);
    3361          15 :         assert_return(e = event_resolve(e), -ENOPKG);
    3362          15 :         assert_return(e->state != SD_EVENT_FINISHED, -ESTALE);
    3363          15 :         assert_return(!event_pid_changed(e), -ECHILD);
    3364             : 
    3365          15 :         e->exit_requested = true;
    3366          15 :         e->exit_code = code;
    3367             : 
    3368          15 :         return 0;
    3369             : }
    3370             : 
    3371          58 : _public_ int sd_event_now(sd_event *e, clockid_t clock, uint64_t *usec) {
    3372          58 :         assert_return(e, -EINVAL);
    3373          58 :         assert_return(e = event_resolve(e), -ENOPKG);
    3374          58 :         assert_return(usec, -EINVAL);
    3375          58 :         assert_return(!event_pid_changed(e), -ECHILD);
    3376             : 
    3377          58 :         if (!TRIPLE_TIMESTAMP_HAS_CLOCK(clock))
    3378           4 :                 return -EOPNOTSUPP;
    3379             : 
    3380             :         /* Generate a clean error in case CLOCK_BOOTTIME is not available. Note that don't use clock_supported() here,
    3381             :          * for a reason: there are systems where CLOCK_BOOTTIME is supported, but CLOCK_BOOTTIME_ALARM is not, but for
    3382             :          * the purpose of getting the time this doesn't matter. */
    3383          54 :         if (IN_SET(clock, CLOCK_BOOTTIME, CLOCK_BOOTTIME_ALARM) && !clock_boottime_supported())
    3384           0 :                 return -EOPNOTSUPP;
    3385             : 
    3386          54 :         if (!triple_timestamp_is_set(&e->timestamp)) {
    3387             :                 /* Implicitly fall back to now() if we never ran
    3388             :                  * before and thus have no cached time. */
    3389          11 :                 *usec = now(clock);
    3390          11 :                 return 1;
    3391             :         }
    3392             : 
    3393          43 :         *usec = triple_timestamp_by_clock(&e->timestamp, clock);
    3394          43 :         return 0;
    3395             : }
    3396             : 
    3397          25 : _public_ int sd_event_default(sd_event **ret) {
    3398          25 :         sd_event *e = NULL;
    3399             :         int r;
    3400             : 
    3401          25 :         if (!ret)
    3402           0 :                 return !!default_event;
    3403             : 
    3404          25 :         if (default_event) {
    3405           0 :                 *ret = sd_event_ref(default_event);
    3406           0 :                 return 0;
    3407             :         }
    3408             : 
    3409          25 :         r = sd_event_new(&e);
    3410          25 :         if (r < 0)
    3411           0 :                 return r;
    3412             : 
    3413          25 :         e->default_event_ptr = &default_event;
    3414          25 :         e->tid = gettid();
    3415          25 :         default_event = e;
    3416             : 
    3417          25 :         *ret = e;
    3418          25 :         return 1;
    3419             : }
    3420             : 
    3421           0 : _public_ int sd_event_get_tid(sd_event *e, pid_t *tid) {
    3422           0 :         assert_return(e, -EINVAL);
    3423           0 :         assert_return(e = event_resolve(e), -ENOPKG);
    3424           0 :         assert_return(tid, -EINVAL);
    3425           0 :         assert_return(!event_pid_changed(e), -ECHILD);
    3426             : 
    3427           0 :         if (e->tid != 0) {
    3428           0 :                 *tid = e->tid;
    3429           0 :                 return 0;
    3430             :         }
    3431             : 
    3432           0 :         return -ENXIO;
    3433             : }
    3434             : 
    3435           3 : _public_ int sd_event_set_watchdog(sd_event *e, int b) {
    3436             :         int r;
    3437             : 
    3438           3 :         assert_return(e, -EINVAL);
    3439           3 :         assert_return(e = event_resolve(e), -ENOPKG);
    3440           3 :         assert_return(!event_pid_changed(e), -ECHILD);
    3441             : 
    3442           3 :         if (e->watchdog == !!b)
    3443           0 :                 return e->watchdog;
    3444             : 
    3445           3 :         if (b) {
    3446             :                 struct epoll_event ev;
    3447             : 
    3448           3 :                 r = sd_watchdog_enabled(false, &e->watchdog_period);
    3449           3 :                 if (r <= 0)
    3450           3 :                         return r;
    3451             : 
    3452             :                 /* Issue first ping immediately */
    3453           0 :                 sd_notify(false, "WATCHDOG=1");
    3454           0 :                 e->watchdog_last = now(CLOCK_MONOTONIC);
    3455             : 
    3456           0 :                 e->watchdog_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
    3457           0 :                 if (e->watchdog_fd < 0)
    3458           0 :                         return -errno;
    3459             : 
    3460           0 :                 r = arm_watchdog(e);
    3461           0 :                 if (r < 0)
    3462           0 :                         goto fail;
    3463             : 
    3464           0 :                 ev = (struct epoll_event) {
    3465             :                         .events = EPOLLIN,
    3466             :                         .data.ptr = INT_TO_PTR(SOURCE_WATCHDOG),
    3467             :                 };
    3468             : 
    3469           0 :                 r = epoll_ctl(e->epoll_fd, EPOLL_CTL_ADD, e->watchdog_fd, &ev);
    3470           0 :                 if (r < 0) {
    3471           0 :                         r = -errno;
    3472           0 :                         goto fail;
    3473             :                 }
    3474             : 
    3475             :         } else {
    3476           0 :                 if (e->watchdog_fd >= 0) {
    3477           0 :                         epoll_ctl(e->epoll_fd, EPOLL_CTL_DEL, e->watchdog_fd, NULL);
    3478           0 :                         e->watchdog_fd = safe_close(e->watchdog_fd);
    3479             :                 }
    3480             :         }
    3481             : 
    3482           0 :         e->watchdog = !!b;
    3483           0 :         return e->watchdog;
    3484             : 
    3485           0 : fail:
    3486           0 :         e->watchdog_fd = safe_close(e->watchdog_fd);
    3487           0 :         return r;
    3488             : }
    3489             : 
    3490           0 : _public_ int sd_event_get_watchdog(sd_event *e) {
    3491           0 :         assert_return(e, -EINVAL);
    3492           0 :         assert_return(e = event_resolve(e), -ENOPKG);
    3493           0 :         assert_return(!event_pid_changed(e), -ECHILD);
    3494             : 
    3495           0 :         return e->watchdog;
    3496             : }
    3497             : 
    3498           0 : _public_ int sd_event_get_iteration(sd_event *e, uint64_t *ret) {
    3499           0 :         assert_return(e, -EINVAL);
    3500           0 :         assert_return(e = event_resolve(e), -ENOPKG);
    3501           0 :         assert_return(!event_pid_changed(e), -ECHILD);
    3502             : 
    3503           0 :         *ret = e->iteration;
    3504           0 :         return 0;
    3505             : }
    3506             : 
    3507           0 : _public_ int sd_event_source_set_destroy_callback(sd_event_source *s, sd_event_destroy_t callback) {
    3508           0 :         assert_return(s, -EINVAL);
    3509             : 
    3510           0 :         s->destroy_callback = callback;
    3511           0 :         return 0;
    3512             : }
    3513             : 
    3514           0 : _public_ int sd_event_source_get_destroy_callback(sd_event_source *s, sd_event_destroy_t *ret) {
    3515           0 :         assert_return(s, -EINVAL);
    3516             : 
    3517           0 :         if (ret)
    3518           0 :                 *ret = s->destroy_callback;
    3519             : 
    3520           0 :         return !!s->destroy_callback;
    3521             : }
    3522             : 
    3523           0 : _public_ int sd_event_source_get_floating(sd_event_source *s) {
    3524           0 :         assert_return(s, -EINVAL);
    3525             : 
    3526           0 :         return s->floating;
    3527             : }
    3528             : 
    3529           0 : _public_ int sd_event_source_set_floating(sd_event_source *s, int b) {
    3530           0 :         assert_return(s, -EINVAL);
    3531             : 
    3532           0 :         if (s->floating == !!b)
    3533           0 :                 return 0;
    3534             : 
    3535           0 :         if (!s->event) /* Already disconnected */
    3536           0 :                 return -ESTALE;
    3537             : 
    3538           0 :         s->floating = b;
    3539             : 
    3540           0 :         if (b) {
    3541           0 :                 sd_event_source_ref(s);
    3542           0 :                 sd_event_unref(s->event);
    3543             :         } else {
    3544           0 :                 sd_event_ref(s->event);
    3545           0 :                 sd_event_source_unref(s);
    3546             :         }
    3547             : 
    3548           0 :         return 1;
    3549             : }

Generated by: LCOV version 1.14