The FreeRADIUS server  $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
event.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
15  */
16 
17 /** Wrapper around libkqueue to make managing events easier
18  *
19  * Non-thread-safe event handling specific to FreeRADIUS.
20  *
21  * By non-thread-safe we mean multiple threads can't insert/delete
22  * events concurrently into the same event list without synchronization.
23  *
24  * @file src/lib/util/event.c
25  *
26  * @copyright 2007-2016 The FreeRADIUS server project
27  * @copyright 2016 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
28  * @copyright 2007 Alan DeKok (aland@freeradius.org)
29  */
30 RCSID("$Id: 465a09a359172389677a73e9109955ec4d7865e7 $")
31 
32 #include <freeradius-devel/util/dlist.h>
33 #include <freeradius-devel/util/event.h>
34 #include <freeradius-devel/util/lst.h>
35 #include <freeradius-devel/util/log.h>
36 #include <freeradius-devel/util/rb.h>
37 #include <freeradius-devel/util/strerror.h>
38 #include <freeradius-devel/util/syserror.h>
39 #include <freeradius-devel/util/table.h>
40 #include <freeradius-devel/util/token.h>
41 #include <freeradius-devel/util/atexit.h>
42 
43 #include <sys/stat.h>
44 #include <sys/wait.h>
45 #include <pthread.h>
46 
47 #ifdef NDEBUG
48 /*
49  * Turn off documentation warnings as file/line
50  * args aren't used for non-debug builds.
51  */
53 DIAG_OFF(documentation)
55 #endif
56 
57 #define FR_EV_BATCH_FDS (256)
58 
59 DIAG_OFF(unused-macros)
60 #define fr_time() static_assert(0, "Use el->time for event loop timing")
61 DIAG_ON(unused-macros)
62 
63 #if !defined(SO_GET_FILTER) && defined(SO_ATTACH_FILTER)
64 # define SO_GET_FILTER SO_ATTACH_FILTER
65 #endif
66 
67 #ifdef WITH_EVENT_DEBUG
68 # define EVENT_DEBUG(fmt, ...) printf("EVENT:");printf(fmt, ## __VA_ARGS__);printf("\n");
69 # ifndef EVENT_REPORT_FREQ
70 # define EVENT_REPORT_FREQ 5
71 # endif
72 #else
73 # define EVENT_DEBUG(...)
74 #endif
75 
77 #ifdef EVFILT_AIO
78  { L("EVFILT_AIO"), EVFILT_AIO },
79 #endif
80 #ifdef EVFILT_EXCEPT
81  { L("EVFILT_EXCEPT"), EVFILT_EXCEPT },
82 #endif
83 #ifdef EVFILT_MACHPORT
84  { L("EVFILT_MACHPORT"), EVFILT_MACHPORT },
85 #endif
86  { L("EVFILT_PROC"), EVFILT_PROC },
87  { L("EVFILT_READ"), EVFILT_READ },
88  { L("EVFILT_SIGNAL"), EVFILT_SIGNAL },
89  { L("EVFILT_TIMER"), EVFILT_TIMER },
90  { L("EVFILT_VNODE"), EVFILT_VNODE },
91  { L("EVFILT_WRITE"), EVFILT_WRITE }
92 };
94 
95 #ifdef EVFILT_LIBKQUEUE
96 static int log_conf_kq;
97 #endif
98 
99 /** A timer event
100  *
101  */
103  fr_time_t when; //!< When this timer should fire.
104 
105  fr_event_timer_cb_t callback; //!< Callback to execute when the timer fires.
106  void const *uctx; //!< Context pointer to pass to the callback.
107 
108  TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to.
109 
110  fr_event_timer_t const **parent; //!< A pointer to the parent structure containing the timer
111  ///< event.
112 
113  fr_lst_index_t lst_id; //!< Where to store opaque lst data.
114  fr_dlist_t entry; //!< List of deferred timer events.
115 
116  fr_event_list_t *el; //!< Event list containing this timer.
117 
118 #ifndef NDEBUG
119  char const *file; //!< Source file this event was last updated in.
120  int line; //!< Line this event was last updated on.
121 #endif
122 };
123 
124 typedef enum {
125  FR_EVENT_FD_SOCKET = 1, //!< is a socket.
126  FR_EVENT_FD_FILE = 2, //!< is a file.
127  FR_EVENT_FD_DIRECTORY = 4, //!< is a directory.
128 
129 #ifdef SO_GET_FILTER
130  FR_EVENT_FD_PCAP = 8,
131 #endif
133 
134 typedef enum {
136 
137  FR_EVENT_FUNC_IDX_FILTER, //!< Sign flip is performed i.e. -1 = 0The filter is used
138  //// as the index in the ev to func index.
139  FR_EVENT_FUNC_IDX_FFLAGS //!< The bit position of the flags in FFLAGS
140  ///< is used to provide the index.
141  ///< i.e. 0x01 -> 0, 0x02 -> 1, 0x08 -> 3 etc..
143 
144 #ifndef SO_GET_FILTER
145 # define FR_EVENT_FD_PCAP 0
146 #endif
147 
148 /** Specifies a mapping between a function pointer in a structure and its respective event
149  *
150  * If the function pointer at the specified offset is set, then a matching event
151  * will be added.
152  *
153  * If the function pointer is NULL, then any existing events will be removed.
154  */
155 typedef struct {
156  size_t offset; //!< Offset of function pointer in structure.
157  char const *name; //!< Name of the event.
158  int16_t filter; //!< Filter to apply.
159  uint16_t flags; //!< Flags to use for inserting event.
160  uint32_t fflags; //!< fflags to pass to filter.
161  int type; //!< Type this filter applies to.
162  bool coalesce; //!< Coalesce this map with the next.
164 
165 typedef struct {
166  fr_event_func_idx_type_t idx_type; //!< What type of index we use for
167  ///< event to function mapping.
168  fr_event_func_map_entry_t *func_to_ev; //!< Function -> Event maps coalesced, out of order.
169  fr_event_func_map_entry_t **ev_to_func; //!< Function -> Event maps in index order.
171 
173  [FR_EVENT_FILTER_IO] = {
175  .func_to_ev = (fr_event_func_map_entry_t[]){
176  {
177  .offset = offsetof(fr_event_io_func_t, read),
178  .name = "read",
179  .filter = EVFILT_READ,
180  .flags = EV_ADD | EV_ENABLE,
181 #ifdef NOTE_NONE
182  .fflags = NOTE_NONE,
183 #else
184  .fflags = 0,
185 #endif
187  },
188  {
189  .offset = offsetof(fr_event_io_func_t, write),
190  .name = "write",
191  .filter = EVFILT_WRITE,
192  .flags = EV_ADD | EV_ENABLE,
193  .fflags = 0,
195  },
196  { 0 }
197  }
198  },
200  .idx_type = FR_EVENT_FUNC_IDX_FFLAGS,
201  .func_to_ev = (fr_event_func_map_entry_t[]){
202  {
203  .offset = offsetof(fr_event_vnode_func_t, delete),
204  .name = "delete",
205  .filter = EVFILT_VNODE,
206  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
207  .fflags = NOTE_DELETE,
209  .coalesce = true
210  },
211  {
212  .offset = offsetof(fr_event_vnode_func_t, write),
213  .name = "write",
214  .filter = EVFILT_VNODE,
215  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
216  .fflags = NOTE_WRITE,
217  .type = FR_EVENT_FD_FILE,
218  .coalesce = true
219  },
220  {
221  .offset = offsetof(fr_event_vnode_func_t, extend),
222  .name = "extend",
223  .filter = EVFILT_VNODE,
224  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
225  .fflags = NOTE_EXTEND,
227  .coalesce = true
228  },
229  {
230  .offset = offsetof(fr_event_vnode_func_t, attrib),
231  .name = "attrib",
232  .filter = EVFILT_VNODE,
233  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
234  .fflags = NOTE_ATTRIB,
235  .type = FR_EVENT_FD_FILE,
236  .coalesce = true
237  },
238  {
239  .offset = offsetof(fr_event_vnode_func_t, link),
240  .name = "link",
241  .filter = EVFILT_VNODE,
242  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
243  .fflags = NOTE_LINK,
244  .type = FR_EVENT_FD_FILE,
245  .coalesce = true
246  },
247  {
248  .offset = offsetof(fr_event_vnode_func_t, rename),
249  .name = "rename",
250  .filter = EVFILT_VNODE,
251  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
252  .fflags = NOTE_RENAME,
253  .type = FR_EVENT_FD_FILE,
254  .coalesce = true
255  },
256 #ifdef NOTE_REVOKE
257  {
258  .offset = offsetof(fr_event_vnode_func_t, revoke),
259  .name = "revoke",
260  .filter = EVFILT_VNODE,
261  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
262  .fflags = NOTE_REVOKE,
263  .type = FR_EVENT_FD_FILE,
264  .coalesce = true
265  },
266 #endif
267 #ifdef NOTE_FUNLOCK
268  {
269  .offset = offsetof(fr_event_vnode_func_t, funlock),
270  .name = "funlock",
271  .filter = EVFILT_VNODE,
272  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
273  .fflags = NOTE_FUNLOCK,
274  .type = FR_EVENT_FD_FILE,
275  .coalesce = true
276  },
277 #endif
278  { 0 }
279  }
280  }
281 };
282 
284  { L("directory"), FR_EVENT_FD_DIRECTORY },
285  { L("file"), FR_EVENT_FD_FILE },
286  { L("pcap"), FR_EVENT_FD_PCAP },
287  { L("socket"), FR_EVENT_FD_SOCKET }
288 };
290 
291 /** A file descriptor/filter event
292  *
293  */
294 struct fr_event_fd {
295  fr_rb_node_t node; //!< Entry in the tree of file descriptor handles.
296  ///< this should really go away and we should pass around
297  ///< handles directly.
298 
299  fr_event_list_t *el; //!< Event list this event belongs to.
301  int fd; //!< File descriptor we're listening for events on.
302 
303  fr_event_fd_type_t type; //!< Type of events we're interested in.
304 
305  int sock_type; //!< The type of socket SOCK_STREAM, SOCK_RAW etc...
306 
307  fr_event_funcs_t active; //!< Active filter functions.
308  fr_event_funcs_t stored; //!< Stored (set, but inactive) filter functions.
309 
310  fr_event_error_cb_t error; //!< Callback for when an error occurs on the FD.
311 
312  fr_event_func_map_t const *map; //!< Function map between #fr_event_funcs_t and kevent filters.
313 
314  bool is_registered; //!< Whether this fr_event_fd_t's FD has been registered with
315  ///< kevent. Mostly for debugging.
316 
317  void *uctx; //!< Context pointer to pass to each file descriptor callback.
318  TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to.
319 
320  fr_dlist_t entry; //!< Entry in free list.
321 
322 #ifndef NDEBUG
323  uintptr_t armour; //!< protection flag from being deleted.
324 #endif
325 
326 #ifndef NDEBUG
327  char const *file; //!< Source file this event was last updated in.
328  int line; //!< Line this event was last updated on.
329 #endif
330 };
331 
332 struct fr_event_pid {
333  fr_event_list_t *el; //!< Event list this event belongs to.
334 
335  bool is_registered; //!< Whether this user event has been registered
336  ///< with the event loop.
337 
338  pid_t pid; //!< child to wait for
340 
341  fr_event_pid_cb_t callback; //!< callback to run when the child exits
342  void *uctx; //!< Context pointer to pass to each file descriptor callback.
343 
344  /** Fields that are only used if we're being triggered by a user event
345  */
346  struct {
347  fr_event_user_t *ev; //!< Fallback user event we use to raise a PID event when
348  ///< a race occurs with kevent.
349  int status; //!< Status we got from waitid.
351 #ifndef NDEBUG
352  char const *file; //!< Source file this event was last updated in.
353  int line; //!< Line this event was last updated on.
354 #endif
355 };
356 
357 /** Hold additional information for automatically reaped PIDs
358  */
359 typedef struct {
360  fr_event_list_t *el; //!< Event list this event belongs to.
361  fr_event_pid_t const *pid_ev; //!< pid_ev this reaper is bound to.
362 
363  fr_dlist_t entry; //!< If the fr_event_pid is in the detached, reap state,
364  ///< it's inserted into a list associated with the event.
365  //!< We then send SIGKILL, and forcefully reap the process
366  ///< on exit.
367 
368  fr_event_pid_cb_t callback; //!< callback to run when the child exits
369  void *uctx; //!< Context pointer to pass to each file descriptor callback.
371 
372 /** Callbacks for kevent() user events
373  *
374  */
376  fr_event_list_t *el; //!< Event list this event belongs to.
377 
378  bool is_registered; //!< Whether this user event has been registered
379  ///< with the event loop.
380 
381  fr_event_user_cb_t callback; //!< The callback to call.
382  void *uctx; //!< Context for the callback.
383 
384 #ifndef NDEBUG
385  char const *file; //!< Source file this event was last updated in.
386  int line; //!< Line this event was last updated on.
387 #endif
388 };
389 
390 /** Callbacks to perform when the event handler is about to check the events
391  *
392  */
393 typedef struct {
394  fr_dlist_t entry; //!< Linked list of callback.
395  fr_event_status_cb_t callback; //!< The callback to call.
396  void *uctx; //!< Context for the callback.
398 
399 /** Callbacks to perform after all timers and FDs have been checked
400  *
401  */
402 typedef struct {
403  fr_dlist_t entry; //!< Linked list of callback.
404  fr_event_timer_cb_t callback; //!< The callback to call.
405  void *uctx; //!< Context for the callback.
407 
408 /** Stores all information relating to an event list
409  *
410  */
412  fr_lst_t *times; //!< of timer events to be executed.
413  fr_rb_tree_t *fds; //!< Tree used to track FDs with filters in kqueue.
414 
415  int will_exit; //!< Will exit on next call to fr_event_corral.
416  int exit; //!< If non-zero event loop will prevent the addition
417  ///< of new events, and will return immediately
418  ///< from the corral/service function.
419 
420  fr_event_time_source_t time; //!< Where our time comes from.
421  fr_time_t now; //!< The last time the event list was serviced.
422  bool dispatch; //!< Whether the event list is currently dispatching events.
423 
424  int num_fd_events; //!< Number of events in this event list.
425 
426  int kq; //!< instance associated with this event list.
427 
428  fr_dlist_head_t pre_callbacks; //!< callbacks when we may be idle...
429  fr_dlist_head_t post_callbacks; //!< post-processing callbacks
430 
431  fr_dlist_head_t pid_to_reap; //!< A list of all orphaned child processes we're
432  ///< waiting to reap.
433 
434  struct kevent events[FR_EV_BATCH_FDS]; /* so it doesn't go on the stack every time */
435 
436  bool in_handler; //!< Deletes should be deferred until after the
437  ///< handlers complete.
438 
439  fr_dlist_head_t fd_to_free; //!< File descriptor events pending deletion.
440  fr_dlist_head_t ev_to_add; //!< dlist of events to add
441 
442 #ifdef WITH_EVENT_DEBUG
443  fr_event_timer_t const *report; //!< Report event.
444 #endif
445 };
446 
448 {
449  switch (map->idx_type) {
450  default:
451  return;
452 
453  /*
454  * - Figure out the lowest filter value
455  * - Invert it
456  * - Allocate an array
457  * - Populate the array
458  */
460  {
461  int low = 0;
463 
464  for (entry = map->func_to_ev; entry->name; entry++) if (entry->filter < low) low = entry->filter;
465 
466  map->ev_to_func = talloc_zero_array(NULL, fr_event_func_map_entry_t *, ~low + 1);
467  if (unlikely(!map->ev_to_func)) abort();
468 
469  for (entry = map->func_to_ev; entry->name; entry++) map->ev_to_func[~entry->filter] = entry;
470  }
471  break;
472 
473  /*
474  * - Figure out the highest bit position
475  * - Allocate an array
476  * - Populate the array
477  */
479  {
480  uint8_t high = 0, pos;
482 
483  for (entry = map->func_to_ev; entry->name; entry++) {
484  pos = fr_high_bit_pos(entry->fflags);
485  if (pos > high) high = pos;
486  }
487 
488  map->ev_to_func = talloc_zero_array(NULL, fr_event_func_map_entry_t *, high);
489  if (unlikely(!map->ev_to_func)) abort();
490 
491  for (entry = map->func_to_ev; entry->name; entry++) {
492  typeof_field(fr_event_func_map_entry_t, fflags) fflags = entry->fflags;
493 
494  /*
495  * Multiple notes can be associated
496  * with the same function.
497  */
498  while ((pos = fr_high_bit_pos(fflags))) {
499  pos -= 1;
500  map->ev_to_func[pos] = entry;
501  /*
502  * Coverity thinks that after this decrement, pos
503  * can be 255 even though the loop condition precludes
504  * it. Adding a Coverity-only check won't change that,
505  * so we're stuck with annotation.
506  */
507  /* coverity [overflow_const] */
508  fflags &= ~(1 << pos);
509  }
510  }
511  }
512  break;
513  }
514 }
515 
516 /** Figure out which function to call given a kevent
517  *
518  * This function should be called in a loop until it returns NULL.
519  *
520  * @param[in] ef File descriptor state handle.
521  * @param[in] filter from the kevent.
522  * @param[in,out] fflags from the kevent. Each call will return the function
523  * from the next most significant NOTE_*, with each
524  * NOTE_* before unset from fflags.
525  * @return
526  * - NULL there are no more callbacks to call.
527  * - The next callback to call.
528  */
529 static inline CC_HINT(always_inline) fr_event_fd_cb_t event_fd_func(fr_event_fd_t *ef, int *filter, int *fflags)
530 {
531  fr_event_func_map_t const *map = ef->map;
532 
533 #define GET_FUNC(_ef, _offset) *((fr_event_fd_cb_t const *)((uint8_t const *)&(_ef)->active + _offset))
534 
535  switch (map->idx_type) {
536  default:
537  fr_assert_fail("Invalid index type %i", map->idx_type);
538  return NULL;
539 
541  {
542  int idx;
543 
544  if (!*filter) return NULL;
545 
546  idx = ~*filter; /* Consume the filter */
547  *filter = 0;
548 
549  return GET_FUNC(ef, map->ev_to_func[idx]->offset);
550  }
551 
553  {
554  int our_fflags = *fflags;
555  uint8_t pos = fr_high_bit_pos(our_fflags);
556 
557  if (!pos) return NULL; /* No more fflags to consume */
558  pos -= 1; /* Saves an array element */
559 
560  *fflags = our_fflags & ~(1 << pos); /* Consume the knote */
561 
562  return GET_FUNC(ef, map->ev_to_func[pos]->offset);
563  }
564  }
565 }
566 
567 /** Compare two timer events to see which one should occur first
568  *
569  * @param[in] a the first timer event.
570  * @param[in] b the second timer event.
571  * @return
572  * - +1 if a should occur later than b.
573  * - -1 if a should occur earlier than b.
574  * - 0 if both events occur at the same time.
575  */
576 static int8_t fr_event_timer_cmp(void const *a, void const *b)
577 {
578  fr_event_timer_t const *ev_a = a, *ev_b = b;
579 
580  return fr_time_cmp(ev_a->when, ev_b->when);
581 }
582 
583 /** Compare two file descriptor handles
584  *
585  * @param[in] one the first file descriptor handle.
586  * @param[in] two the second file descriptor handle.
587  * @return CMP(one, two)
588  */
589 static int8_t fr_event_fd_cmp(void const *one, void const *two)
590 {
591  fr_event_fd_t const *a = one, *b = two;
592 
593  CMP_RETURN(a, b, fd);
594 
595  return CMP(a->filter, b->filter);
596 }
597 
598 /** Return the number of file descriptors is_registered with this event loop
599  *
600  */
602 {
603  if (unlikely(!el)) return -1;
604 
605  return fr_rb_num_elements(el->fds);
606 }
607 
608 /** Return the number of timer events currently scheduled
609  *
610  * @param[in] el to return timer events for.
611  * @return number of timer events.
612  */
614 {
615  if (unlikely(!el)) return -1;
616 
617  return fr_lst_num_elements(el->times);
618 }
619 
620 /** Return the kq associated with an event list.
621  *
622  * @param[in] el to return timer events for.
623  * @return kq
624  */
626 {
627  if (unlikely(!el)) return -1;
628 
629  return el->kq;
630 }
631 
632 /** Get the current server time according to the event list
633  *
634  * If the event list is currently dispatching events, we return the time
635  * this iteration of the event list started.
636  *
637  * If the event list is not currently dispatching events, we return the
638  * current system time.
639  *
640  * @param[in] el to get time from.
641  * @return the current time according to the event list.
642  */
644 {
645  if (el->dispatch) {
646  return el->now;
647  } else {
648  return el->time();
649  }
650 }
651 
652 /** Placeholder callback to avoid branches in service loop
653  *
654  * This is set in place of any NULL function pointers, so that the event loop doesn't
655  * SEGV if a filter callback function is unset between corral and service.
656  */
657 static void fr_event_fd_noop(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, UNUSED void *uctx)
658 {
659  return;
660 }
661 
662 /** Build a new evset based on function pointers present
663  *
664  * @note The contents of active functions may be inconsistent if this function errors. But the
665  * only time that will occur is if the caller passed invalid arguments.
666  *
667  * @param[in] el we're building events for.
668  * @param[out] out_kev where to write the evset.
669  * @param[in] outlen length of output buffer.
670  * @param[out] active The set of function pointers with active filters.
671  * @param[in] ef event to insert.
672  * @param[in] new Functions to map to filters.
673  * @param[in] prev Previous set of functions mapped to filters.
674  * @return
675  * - >= 0 the number of changes written to out.
676  * - < 0 an error occurred.
677  */
679 #ifndef WITH_EVENT_DEBUG
680  UNUSED
681 #endif
683  struct kevent out_kev[], size_t outlen, fr_event_funcs_t *active,
684  fr_event_fd_t *ef,
685  fr_event_funcs_t const *new, fr_event_funcs_t const *prev)
686 {
687  struct kevent *out = out_kev, *end = out + outlen;
688  fr_event_func_map_entry_t const *map;
689  struct kevent add[10], *add_p = add;
690  size_t i;
691 
692  EVENT_DEBUG("%p - Building new evset for FD %i (new %p, prev %p)", el, ef->fd, new, prev);
693 
694  /*
695  * Iterate over the function map, setting/unsetting
696  * filters and filter flags.
697  */
698  for (map = ef->map->func_to_ev; map->name; map++) {
699  bool has_current_func = false;
700  bool has_prev_func = false;
701  uint32_t current_fflags = 0;
702  uint32_t prev_fflags = 0;
703 
704  do {
705  fr_event_fd_cb_t prev_func;
706  fr_event_fd_cb_t new_func;
707 
708  /*
709  * If the previous value was the 'noop'
710  * callback, it's identical to being unset.
711  */
712  prev_func = *(fr_event_fd_cb_t const *)((uint8_t const *)prev + map->offset);
713  if (prev_func && (prev_func != fr_event_fd_noop)) {
714  EVENT_DEBUG("\t%s prev set (%p)", map->name, prev_func);
715  prev_fflags |= map->fflags;
716  has_prev_func = true;
717  } else {
718  EVENT_DEBUG("\t%s prev unset", map->name);
719  }
720 
721  new_func = *(fr_event_fd_cb_t const *)((uint8_t const *)new + map->offset);
722  if (new_func && (new_func != fr_event_fd_noop)) {
723  EVENT_DEBUG("\t%s curr set (%p)", map->name, new_func);
724  current_fflags |= map->fflags;
725  has_current_func = true;
726 
727  /*
728  * Check the filter will work for the
729  * type of file descriptor specified.
730  */
731  if (!(map->type & ef->type)) {
732  fr_strerror_printf("kevent %s (%s), can't be applied to fd of type %s",
733  map->name,
736  map->type, "<INVALID>"));
737  return -1;
738  }
739 
740  /*
741  * Mark this filter function as active
742  */
743  memcpy((uint8_t *)active + map->offset, (uint8_t const *)new + map->offset,
744  sizeof(fr_event_fd_cb_t));
745  } else {
746  EVENT_DEBUG("\t%s curr unset", map->name);
747 
748  /*
749  * Mark this filter function as inactive
750  * by setting it to the 'noop' callback.
751  */
752  *((fr_event_fd_cb_t *)((uint8_t *)active + map->offset)) = fr_event_fd_noop;
753  }
754 
755  if (!(map + 1)->coalesce) break;
756  map++;
757  } while (1);
758 
759  if (out > end) {
760  fr_strerror_const("Out of memory to store kevent filters");
761  return -1;
762  }
763 
764  /*
765  * Upsert if we add a function or change the flags.
766  */
767  if (has_current_func &&
768  (!has_prev_func || (current_fflags != prev_fflags))) {
769  if ((size_t)(add_p - add) >= (NUM_ELEMENTS(add))) {
770  fr_strerror_const("Out of memory to store kevent EV_ADD filters");
771  return -1;
772  }
773  EVENT_DEBUG("\tEV_SET EV_ADD filter %s (%i), flags %i, fflags %i",
775  map->filter, map->flags, current_fflags);
776  EV_SET(add_p++, ef->fd, map->filter, map->flags, current_fflags, 0, ef);
777 
778  /*
779  * Delete if we remove a function.
780  */
781  } else if (!has_current_func && has_prev_func) {
782  EVENT_DEBUG("\tEV_SET EV_DELETE filter %s (%i), flags %i, fflags %i",
784  map->filter, EV_DELETE, 0);
785  EV_SET(out++, ef->fd, map->filter, EV_DELETE, 0, 0, ef);
786  }
787  }
788 
789  /*
790  * kevent is fine with adds/deletes in the same operation
791  * on the same file descriptor, but libkqueue doesn't do
792  * any kind of coalescing or ordering so you get an EEXIST
793  * error.
794  */
795  for (i = 0; i < (size_t)(add_p - add); i++) memcpy(out++, &add[i], sizeof(*out));
796 
797  return out - out_kev;
798 }
799 
800 /** Discover the type of a file descriptor
801  *
802  * This function writes the result of the discovery to the ef->type,
803  * and ef->sock_type fields.
804  *
805  * @param[out] ef to write type data to.
806  * @param[in] fd to discover the type of.
807  * @return
808  * - 0 on success.
809  * - -1 on failure.
810  */
811 static int fr_event_fd_type_set(fr_event_fd_t *ef, int fd)
812 {
813  socklen_t opt_len = sizeof(ef->sock_type);
814 
815  /*
816  * It's a socket or PCAP socket
817  */
818  if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &ef->sock_type, &opt_len) == 0) {
819 #ifdef SO_GET_FILTER
820  opt_len = 0;
821  if (unlikely(getsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, NULL, &opt_len) < 0)) {
822  fr_strerror_printf("Failed determining PF status: %s", fr_syserror(errno));
823  return -1;
824  }
825  if (opt_len) {
826  ef->type = FR_EVENT_FD_PCAP;
827  } else
828 #endif
829  {
830  ef->type = FR_EVENT_FD_SOCKET;
831  }
832 
833  /*
834  * It's a file or directory
835  */
836  } else {
837  struct stat buf;
838 
839  if (errno != ENOTSOCK) {
840  fr_strerror_printf("Failed retrieving socket type: %s", fr_syserror(errno));
841  return -1;
842  }
843 
844  if (fstat(fd, &buf) < 0) {
845  fr_strerror_printf("Failed calling stat() on file: %s", fr_syserror(errno));
846  return -1;
847  }
848 
849  if (S_ISDIR(buf.st_mode)) {
851  } else {
852  ef->type = FR_EVENT_FD_FILE;
853  }
854  }
855  ef->fd = fd;
856 
857  return 0;
858 }
859 
860 /** Remove a file descriptor from the event loop and rbtree but don't explicitly free it
861  *
862  *
863  * @param[in] ef to remove.
864  * @return
865  * - 0 on success.
866  * - -1 on error;
867  */
869 {
870  struct kevent evset[10];
871  int count = 0;
872  fr_event_list_t *el = ef->el;
873  fr_event_funcs_t funcs;
874 
875  /*
876  * Already been removed from the various trees and
877  * the event loop.
878  */
879  if (ef->is_registered) {
880  memset(&funcs, 0, sizeof(funcs));
881 
882  fr_assert(ef->armour == 0);
883 
884  /*
885  * If this fails, it's a pretty catastrophic error.
886  */
887  count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
888  &ef->active, ef, &funcs, &ef->active);
889  if (count > 0) {
890  int ret;
891 
892  /*
893  * If this fails, assert on debug builds.
894  */
895  ret = kevent(el->kq, evset, count, NULL, 0, NULL);
896  if (!fr_cond_assert_msg(ret >= 0,
897  "FD %i was closed without being removed from the KQ: %s",
898  ef->fd, fr_syserror(errno))) {
899  return -1; /* Prevent the free, and leave the fd in the trees */
900  }
901  }
902 
903  fr_rb_delete(el->fds, ef);
904  ef->is_registered = false;
905  }
906 
907  /*
908  * Insert into the deferred free list, event will be
909  * freed later.
910  */
911  if (el->in_handler) {
912  /*
913  * Don't allow the same event to be
914  * inserted into the free list multiple
915  * times.
916  *
917  * This can happen if the same ef is
918  * delivered by multiple filters, i.e.
919  * if EVFILT_READ and EVFILT_WRITE
920  * were both high, and both handlers
921  * attempted to delete the event
922  * we'd need to prevent the event being
923  * inserted into the free list multiple
924  * times.
925  */
927  return -1; /* Will be freed later */
928  } else if (fr_dlist_entry_in_list(&ef->entry)) {
930  }
931 
932  return 0;
933 }
934 
935 /** Move a file descriptor event from one event list to another
936  *
937  * FIXME - Move suspended events too.
938  *
939  * @note Any pending events will not be transferred.
940  *
941  * @param[in] dst Event list to move file descriptor event to.
942  * @param[in] src Event list to move file descriptor from.
943  * @param[in] fd of the event to move.
944  * @param[in] filter of the event to move.
945  * @return
946  * - 0 on success.
947  * - -1 on failure. The event will remain active in the src list.
948  */
950  fr_event_list_t *dst, fr_event_list_t *src, int fd, fr_event_filter_t filter)
951 {
952  fr_event_fd_t *ef;
953  int ret;
954 
955  if (fr_event_loop_exiting(dst)) {
956  fr_strerror_const("Destination event loop exiting");
957  return -1;
958  }
959 
960  /*
961  * Ensure this exists
962  */
963  ef = fr_rb_find(src->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
964  if (unlikely(!ef)) {
965  fr_strerror_printf("No events are registered for fd %i", fd);
966  return -1;
967  }
968 
970  ef->linked_ctx, NULL,
971  dst, ef->fd, ef->filter, &ef->active, ef->error, ef->uctx);
972  if (ret < 0) return -1;
973 
974  (void)fr_event_fd_delete(src, ef->fd, ef->filter);
975 
976  return ret;
977 }
978 
979 
980 /** Suspend/resume a subset of filters
981  *
982  * This function trades producing useful errors for speed.
983  *
984  * An example of suspending the read filter for an FD would be:
985  @code {.c}
986  static fr_event_update_t pause_read[] = {
987  FR_EVENT_SUSPEND(fr_event_io_func_t, read),
988  { 0 }
989  }
990 
991  fr_event_filter_update(el, fd, FR_EVENT_FILTER_IO, pause_read);
992  @endcode
993  *
994  * @param[in] el to update descriptor in.
995  * @param[in] fd to update filters for.
996  * @param[in] filter The type of filter to update.
997  * @param[in] updates An array of updates to toggle filters on/off without removing
998  * the callback function.
999  */
1001  fr_event_list_t *el, int fd, fr_event_filter_t filter, fr_event_update_t const updates[])
1002 {
1003  fr_event_fd_t *ef;
1004  size_t i;
1005  fr_event_funcs_t curr_active, curr_stored;
1006  struct kevent evset[10];
1007  int count = 0;
1008 
1009  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1010  if (unlikely(!ef)) {
1011  fr_strerror_printf("No events are registered for fd %i", fd);
1012  return -1;
1013  }
1014 
1015 #ifndef NDEBUG
1016  ef->file = file;
1017  ef->line = line;
1018 #endif
1019 
1020  /*
1021  * Cheapest way of ensuring this function can error without
1022  * leaving everything in an inconsistent state.
1023  */
1024  memcpy(&curr_active, &ef->active, sizeof(curr_active));
1025  memcpy(&curr_stored, &ef->stored, sizeof(curr_stored));
1026 
1027  /*
1028  * Apply modifications to our copies of the active/stored array.
1029  */
1030  for (i = 0; updates[i].op; i++) {
1031  switch (updates[i].op) {
1032  default:
1033  case FR_EVENT_OP_SUSPEND:
1034  fr_assert(ef->armour == 0); /* can't suspect protected FDs */
1035  memcpy((uint8_t *)&ef->stored + updates[i].offset,
1036  (uint8_t *)&ef->active + updates[i].offset, sizeof(fr_event_fd_cb_t));
1037  memset((uint8_t *)&ef->active + updates[i].offset, 0, sizeof(fr_event_fd_cb_t));
1038  break;
1039 
1040  case FR_EVENT_OP_RESUME:
1041  memcpy((uint8_t *)&ef->active + updates[i].offset,
1042  (uint8_t *)&ef->stored + updates[i].offset, sizeof(fr_event_fd_cb_t));
1043  memset((uint8_t *)&ef->stored + updates[i].offset, 0, sizeof(fr_event_fd_cb_t));
1044  break;
1045  }
1046  }
1047 
1048  count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset), &ef->active,
1049  ef, &ef->active, &curr_active);
1050  if (unlikely(count < 0)) {
1051  error:
1052  memcpy(&ef->active, &curr_active, sizeof(curr_active));
1053  memcpy(&ef->stored, &curr_stored, sizeof(curr_stored));
1054  return -1;
1055  }
1056 
1057  if (count && unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0)) {
1058  fr_strerror_printf("Failed updating filters for FD %i: %s", ef->fd, fr_syserror(errno));
1059  goto error;
1060  }
1061 
1062  return 0;
1063 }
1064 
1065 /** Insert a filter for the specified fd
1066  *
1067  * @param[in] ctx to bind lifetime of the event to.
1068  * @param[out] ef_out Previously allocated ef, or NULL.
1069  * @param[in] el to insert fd callback into.
1070  * @param[in] fd to install filters for.
1071  * @param[in] filter one of the #fr_event_filter_t values.
1072  * @param[in] funcs Structure containing callback functions. If a function pointer
1073  * is set, the equivalent kevent filter will be installed.
1074  * @param[in] error function to call when an error occurs on the fd.
1075  * @param[in] uctx to pass to handler.
1076  */
1078  TALLOC_CTX *ctx, fr_event_fd_t **ef_out,
1079  fr_event_list_t *el, int fd,
1080  fr_event_filter_t filter,
1081  void *funcs, fr_event_error_cb_t error,
1082  void *uctx)
1083 {
1084  ssize_t count;
1085  fr_event_fd_t *ef;
1086  fr_event_funcs_t active;
1087  struct kevent evset[10];
1088 
1089  if (unlikely(!el)) {
1090  fr_strerror_const("Invalid argument: NULL event list");
1091  return -1;
1092  }
1093 
1094  if (unlikely(fd < 0)) {
1095  fr_strerror_printf("Invalid arguments: Bad FD %i", fd);
1096  return -1;
1097  }
1098 
1099  if (unlikely(el->exit)) {
1100  fr_strerror_const("Event loop exiting");
1101  return -1;
1102  }
1103 
1104  if (!ef_out || !*ef_out) {
1105  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1106  } else {
1107  ef = *ef_out;
1108  fr_assert((fd < 0) || (ef->fd == fd));
1109  }
1110 
1111  /*
1112  * Need to free the event to change the talloc link.
1113  *
1114  * This is generally bad. If you hit this
1115  * code path you probably screwed up somewhere.
1116  */
1117  if (unlikely(ef && (ef->linked_ctx != ctx))) TALLOC_FREE(ef);
1118 
1119  /*
1120  * No pre-existing event. Allocate an entry
1121  * for insertion into the rbtree.
1122  */
1123  if (!ef) {
1124  ef = talloc_zero(el, fr_event_fd_t);
1125  if (unlikely(!ef)) {
1126  fr_strerror_const("Out of memory");
1127  return -1;
1128  }
1129  talloc_set_destructor(ef, _event_fd_delete);
1130 
1131  /*
1132  * Bind the lifetime of the event to the specified
1133  * talloc ctx. If the talloc ctx is freed, the
1134  * event will also be freed.
1135  */
1136  if (ctx != el) talloc_link_ctx(ctx, ef);
1137  ef->linked_ctx = ctx;
1138  ef->el = el;
1139 
1140  /*
1141  * Determine what type of file descriptor
1142  * this is.
1143  */
1144  if (fr_event_fd_type_set(ef, fd) < 0) {
1145  free:
1146  talloc_free(ef);
1147  return -1;
1148  }
1149 
1150  /*
1151  * Check the filter value is valid
1152  */
1153  if ((filter > (NUM_ELEMENTS(filter_maps) - 1))) {
1154  not_supported:
1155  fr_strerror_printf("Filter %i not supported", filter);
1156  goto free;
1157  }
1158  ef->map = &filter_maps[filter];
1159  if (ef->map->idx_type == FR_EVENT_FUNC_IDX_NONE) goto not_supported;
1160 
1161  count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
1162  &ef->active, ef, funcs, &ef->active);
1163  if (count < 0) goto free;
1164  if (count && (unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0))) {
1165  fr_strerror_printf("Failed inserting filters for FD %i: %s", fd, fr_syserror(errno));
1166  goto free;
1167  }
1168 
1169  ef->filter = filter;
1170  fr_rb_insert(el->fds, ef);
1171  ef->is_registered = true;
1172 
1173  /*
1174  * Pre-existing event, update the filters and
1175  * functions associated with the file descriptor.
1176  */
1177  } else {
1178  fr_assert(ef->is_registered == true);
1179 
1180  /*
1181  * Take a copy of the current set of active
1182  * functions, so we can error out in a
1183  * consistent state.
1184  */
1185  memcpy(&active, &ef->active, sizeof(ef->active));
1186 
1187  fr_assert((ef->armour == 0) || ef->active.io.read);
1188 
1189  count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
1190  &ef->active, ef, funcs, &ef->active);
1191  if (count < 0) {
1192  error:
1193  memcpy(&ef->active, &active, sizeof(ef->active));
1194  return -1;
1195  }
1196  if (count && (unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0))) {
1197  fr_strerror_printf("Failed modifying filters for FD %i: %s", fd, fr_syserror(errno));
1198  goto error;
1199  }
1200 
1201  /*
1202  * Clear any previously suspended functions
1203  */
1204  memset(&ef->stored, 0, sizeof(ef->stored));
1205  }
1206 
1207 #ifndef NDEBUG
1208  ef->file = file;
1209  ef->line = line;
1210 #endif
1211  ef->error = error;
1212  ef->uctx = uctx;
1213 
1214  if (ef_out) *ef_out = ef;
1215 
1216  return 0;
1217 }
1218 
1219 /** Associate I/O callbacks with a file descriptor
1220  *
1221  * @param[in] ctx to bind lifetime of the event to.
1222  * @param[out] ef_out Where to store the output event
1223  * @param[in] el to insert fd callback into.
1224  * @param[in] fd to install filters for.
1225  * @param[in] read_fn function to call when fd is readable.
1226  * @param[in] write_fn function to call when fd is writable.
1227  * @param[in] error function to call when an error occurs on the fd.
1228  * @param[in] uctx to pass to handler.
1229  * @return
1230  * - 0 on success.
1231  * - -1 on failure.
1232  */
1234  TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd,
1235  fr_event_fd_cb_t read_fn,
1236  fr_event_fd_cb_t write_fn,
1237  fr_event_error_cb_t error,
1238  void *uctx)
1239 {
1240  fr_event_io_func_t funcs = { .read = read_fn, .write = write_fn };
1241 
1242  if (unlikely(!read_fn && !write_fn)) {
1243  fr_strerror_const("Invalid arguments: All callbacks are NULL");
1244  return -1;
1245  }
1246 
1248  ctx, ef_out, el, fd, FR_EVENT_FILTER_IO, &funcs, error, uctx);
1249 }
1250 
1251 /** Remove a file descriptor from the event loop
1252  *
1253  * @param[in] el to remove file descriptor from.
1254  * @param[in] fd to remove.
1255  * @param[in] filter The type of filter to remove.
1256  * @return
1257  * - 0 if file descriptor was removed.
1258  * - <0 on error.
1259  */
1261 {
1262  fr_event_fd_t *ef;
1263 
1264  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1265  if (unlikely(!ef)) {
1266  fr_strerror_printf("No events are registered for fd %i, filter %d", fd, filter);
1267  return -1;
1268  }
1269 
1270  /*
1271  * Free will normally fail if it's
1272  * a deferred free. There is a special
1273  * case for kevent failures though.
1274  *
1275  * We distinguish between the two by
1276  * looking to see if the ef is still
1277  * in the even tree.
1278  *
1279  * Talloc returning -1 guarantees the
1280  * memory has not been freed.
1281  */
1282  if ((talloc_free(ef) == -1) && ef->is_registered) return -1;
1283 
1284  return 0;
1285 }
1286 
1287 /** Get the opaque event handle from a file descriptor
1288  *
1289  * @param[in] el to search for fd/filter in.
1290  * @param[in] fd to search for.
1291  * @param[in] filter to search for.
1292  * @return
1293  * - NULL if no event could be found.
1294  * - The opaque handle representing an fd event.
1295  */
1297 {
1298  fr_event_fd_t *ef;
1299 
1300  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1301  if (unlikely(!ef)) {
1302  fr_strerror_printf("No events are registered for fd %i", fd);
1303  return NULL;
1304  }
1305 
1306  return ef;
1307 }
1308 
1309 /** Returns the appropriate callback function for a given event
1310  *
1311  * @param[in] ef the event filter fd handle.
1312  * @param[in] kq_filter If the callbacks are indexed by filter.
1313  * @param[in] kq_fflags If the callbacks are indexed by NOTES (fflags).
1314  * @return
1315  * - NULL if no event it associated with the given ef/kq_filter or kq_fflags combo.
1316  * - The callback that would be called if an event with this filter/fflag combo was received.
1317  */
1318 fr_event_fd_cb_t fr_event_fd_cb(fr_event_fd_t *ef, int kq_filter, int kq_fflags)
1319 {
1320  return event_fd_func(ef, &kq_filter, &kq_fflags);
1321 }
1322 
1323 /** Returns the uctx associated with an fr_event_fd_t handle
1324  *
1325  */
1327 {
1328  return ef->uctx;
1329 }
1330 
1331 #ifndef NDEBUG
1332 /** Armour an FD
1333  *
1334  * @param[in] el to remove file descriptor from.
1335  * @param[in] fd to remove.
1336  * @param[in] filter The type of filter to remove.
1337  * @param[in] armour The armour to add.
1338  * @return
1339  * - 0 if file descriptor was armoured
1340  * - <0 on error.
1341  */
1342 int fr_event_fd_armour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
1343 {
1344  fr_event_fd_t *ef;
1345 
1346  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1347  if (unlikely(!ef)) {
1348  fr_strerror_printf("No events are registered for fd %i", fd);
1349  return -1;
1350  }
1351 
1352  if (ef->armour != 0) {
1353  fr_strerror_printf("FD %i is already armoured", fd);
1354  return -1;
1355  }
1356 
1357  ef->armour = armour;
1358 
1359  return 0;
1360 }
1361 
1362 /** Unarmour an FD
1363  *
1364  * @param[in] el to remove file descriptor from.
1365  * @param[in] fd to remove.
1366  * @param[in] filter The type of filter to remove.
1367  * @param[in] armour The armour to remove
1368  * @return
1369  * - 0 if file descriptor was unarmoured
1370  * - <0 on error.
1371  */
1372 int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
1373 {
1374  fr_event_fd_t *ef;
1375 
1376  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1377  if (unlikely(!ef)) {
1378  fr_strerror_printf("No events are registered for fd %i", fd);
1379  return -1;
1380  }
1381 
1382  fr_assert(ef->armour == armour);
1383 
1384  ef->armour = 0;
1385  return 0;
1386 }
1387 #endif
1388 
1389 /** Remove an event from the event loop
1390  *
1391  * @param[in] ev to free.
1392  * @return
1393  * - 0 on success.
1394  * - -1 on failure.
1395  */
1397 {
1398  fr_event_list_t *el = ev->el;
1399  fr_event_timer_t const **ev_p;
1400 
1401  if (fr_dlist_entry_in_list(&ev->entry)) {
1402  (void) fr_dlist_remove(&el->ev_to_add, ev);
1403  } else {
1404  int ret = fr_lst_extract(el->times, ev);
1405  char const *err_file;
1406  int err_line;
1407 
1408 #ifndef NDEBUG
1409  err_file = ev->file;
1410  err_line = ev->line;
1411 #else
1412  err_file = "not-available";
1413  err_line = 0;
1414 #endif
1415 
1416 
1417  /*
1418  * Events MUST be in the lst (or the insertion list).
1419  */
1420  if (!fr_cond_assert_msg(ret == 0,
1421  "Event %p, lst_id %i, allocd %s[%u], was not found in the event lst or "
1422  "insertion list when freed: %s", ev, ev->lst_id, err_file, err_line,
1423  fr_strerror())) return -1;
1424  }
1425 
1426  ev_p = ev->parent;
1427  fr_assert(*(ev->parent) == ev);
1428  *ev_p = NULL;
1429 
1430  return 0;
1431 }
1432 
1433 /** Insert a timer event into an event list
1434  *
1435  * @note The talloc parent of the memory returned in ev_p must not be changed.
1436  * If the lifetime of the event needs to be bound to another context
1437  * this function should be called with the existing event pointed to by
1438  * ev_p.
1439  *
1440  * @param[in] ctx to bind lifetime of the event to.
1441  * @param[in] el to insert event into.
1442  * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
1443  * in a temporal sense, not in a memory structure or dependency sense.
1444  * @param[in] when we should run the event.
1445  * @param[in] callback function to execute if the event fires.
1446  * @param[in] uctx user data to pass to the event.
1447  * @return
1448  * - 0 on success.
1449  * - -1 on failure.
1450  */
1452  TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p,
1453  fr_time_t when, fr_event_timer_cb_t callback, void const *uctx)
1454 {
1455  fr_event_timer_t *ev;
1456 
1457  if (unlikely(!el)) {
1458  fr_strerror_const("Invalid arguments: NULL event list");
1459  return -1;
1460  }
1461 
1462  if (unlikely(!callback)) {
1463  fr_strerror_const("Invalid arguments: NULL callback");
1464  return -1;
1465  }
1466 
1467  if (unlikely(!ev_p)) {
1468  fr_strerror_const("Invalid arguments: NULL ev_p");
1469  return -1;
1470  }
1471 
1472  if (unlikely(el->exit)) {
1473  fr_strerror_const("Event loop exiting");
1474  return -1;
1475  }
1476 
1477  /*
1478  * If there is an event, reuse it instead of freeing it
1479  * and allocating a new one. This is to reduce memory
1480  * churn for repeat events.
1481  */
1482  if (!*ev_p) {
1483  new_event:
1484  ev = talloc_zero(el, fr_event_timer_t);
1485  if (unlikely(!ev)) return -1;
1486 
1487  EVENT_DEBUG("%p - %s[%i] Added new timer %p", el, file, line, ev);
1488 
1489  /*
1490  * Bind the lifetime of the event to the specified
1491  * talloc ctx. If the talloc ctx is freed, the
1492  * event will also be freed.
1493  */
1494  if (ctx != el) talloc_link_ctx(ctx, ev);
1495 
1496  talloc_set_destructor(ev, _event_timer_free);
1497  ev->lst_id = 0;
1498 
1499  } else {
1500  ev = UNCONST(fr_event_timer_t *, *ev_p);
1501 
1502  EVENT_DEBUG("%p - %s[%i] Re-armed timer %p", el, file, line, ev);
1503 
1504  /*
1505  * We can't disarm the linking context due to
1506  * limitations in talloc, so if the linking
1507  * context changes, we need to free the old
1508  * event, and allocate a new one.
1509  *
1510  * Freeing the event also removes it from the lst.
1511  */
1512  if (unlikely(ev->linked_ctx != ctx)) {
1513  talloc_free(ev);
1514  goto new_event;
1515  }
1516 
1517  /*
1518  * Event may have fired, in which case the event
1519  * will no longer be in the event loop, so check
1520  * if it's in the lst before extracting it.
1521  */
1522  if (!fr_dlist_entry_in_list(&ev->entry)) {
1523  int ret;
1524  char const *err_file;
1525  int err_line;
1526 
1527  ret = fr_lst_extract(el->times, ev);
1528 
1529 #ifndef NDEBUG
1530  err_file = ev->file;
1531  err_line = ev->line;
1532 #else
1533  err_file = "not-available";
1534  err_line = 0;
1535 #endif
1536 
1537  /*
1538  * Events MUST be in the lst (or the insertion list).
1539  */
1540  if (!fr_cond_assert_msg(ret == 0,
1541  "Event %p, lst_id %i, allocd %s[%u], was not found in the event "
1542  "lst or insertion list when freed: %s", ev, ev->lst_id,
1543  err_file, err_line, fr_strerror())) return -1;
1544  }
1545  }
1546 
1547  ev->el = el;
1548  ev->when = when;
1549  ev->callback = callback;
1550  ev->uctx = uctx;
1551  ev->linked_ctx = ctx;
1552  ev->parent = ev_p;
1553 #ifndef NDEBUG
1554  ev->file = file;
1555  ev->line = line;
1556 #endif
1557 
1558  if (el->in_handler) {
1559  /*
1560  * Don't allow an event to be inserted
1561  * into the deferred insertion list
1562  * multiple times.
1563  */
1565  } else if (unlikely(fr_lst_insert(el->times, ev) < 0)) {
1566  fr_strerror_const_push("Failed inserting event");
1567  talloc_set_destructor(ev, NULL);
1568  *ev_p = NULL;
1569  talloc_free(ev);
1570  return -1;
1571  }
1572 
1573  *ev_p = ev;
1574 
1575  return 0;
1576 }
1577 
1578 /** Insert a timer event into an event list
1579  *
1580  * @note The talloc parent of the memory returned in ev_p must not be changed.
1581  * If the lifetime of the event needs to be bound to another context
1582  * this function should be called with the existing event pointed to by
1583  * ev_p.
1584  *
1585  * @param[in] ctx to bind lifetime of the event to.
1586  * @param[in] el to insert event into.
1587  * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
1588  * in a temporal sense, not in a memory structure or dependency sense.
1589  * @param[in] delta In how many nanoseconds to wait before should we execute the event.
1590  * @param[in] callback function to execute if the event fires.
1591  * @param[in] uctx user data to pass to the event.
1592  * @return
1593  * - 0 on success.
1594  * - -1 on failure.
1595  */
1597  TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p,
1598  fr_time_delta_t delta, fr_event_timer_cb_t callback, void const *uctx)
1599 {
1601  ctx, el, ev_p, fr_time_add(el->time(), delta), callback, uctx);
1602 }
1603 
1604 /** Delete a timer event from the event list
1605  *
1606  * @param[in] ev_p of the event being deleted.
1607  * @return
1608  * - 0 on success.
1609  * - -1 on failure.
1610  */
1612 {
1613  fr_event_timer_t *ev;
1614  int ret;
1615 
1616  if (unlikely(!*ev_p)) return 0;
1617 
1618  ev = UNCONST(fr_event_timer_t *, *ev_p);
1619  ret = talloc_free(ev);
1620 
1621  /*
1622  * Don't leave a garbage pointer value
1623  * in the parent.
1624  */
1625  if (likely(ret == 0)) *ev_p = NULL;
1626  return 0;
1627 }
1628 
1629 /** Internal timestamp representing when the timer should fire
1630  *
1631  * @return When the timestamp should fire.
1632  */
1634 {
1635  return ev->when;
1636 }
1637 
1638 /** Remove PID wait event from kevent if the fr_event_pid_t is freed
1639  *
1640  * @param[in] ev to free.
1641  * @return 0
1642  */
1644 {
1645  struct kevent evset;
1646 
1647  if (ev->parent) *ev->parent = NULL;
1648  if (!ev->is_registered || (ev->pid < 0)) return 0; /* already deleted from kevent */
1649 
1650  EVENT_DEBUG("%p - Disabling event for PID %u - %p was freed", ev->el, (unsigned int)ev->pid, ev);
1651 
1652  EV_SET(&evset, ev->pid, EVFILT_PROC, EV_DELETE, NOTE_EXIT, 0, ev);
1653 
1654  (void) kevent(ev->el->kq, &evset, 1, NULL, 0, NULL);
1655 
1656  return 0;
1657 }
1658 
1659 /** Evaluate a EVFILT_PROC event
1660  *
1661  */
1662 static inline CC_HINT(always_inline)
1663 void event_pid_eval(fr_event_list_t *el, struct kevent *kev)
1664 {
1665  pid_t pid;
1666  fr_event_pid_t *ev;
1667  fr_event_pid_cb_t callback;
1668  void *uctx;
1669 
1670  EVENT_DEBUG("%p - PID %u exited with status %i",
1671  el, (unsigned int)kev->ident, (unsigned int)kev->data);
1672 
1673  ev = talloc_get_type_abort((void *)kev->udata, fr_event_pid_t);
1674 
1675  fr_assert(ev->pid == (pid_t) kev->ident);
1676  fr_assert((kev->fflags & NOTE_EXIT) != 0);
1677 
1678  pid = ev->pid;
1679  callback = ev->callback;
1680  uctx = ev->uctx;
1681 
1682  ev->is_registered = false; /* so we won't hit kevent again when it's freed */
1683 
1684  /*
1685  * Delete the event before calling it.
1686  *
1687  * This also sets the parent pointer
1688  * to NULL, so the thing that started
1689  * monitoring the process knows the
1690  * handle is no longer valid.
1691  *
1692  * EVFILT_PROC NOTE_EXIT events are always
1693  * oneshot no matter what flags we pass,
1694  * so we're just reflecting the state of
1695  * the kqueue.
1696  */
1697  talloc_free(ev);
1698 
1699  if (callback) callback(el, pid, (int) kev->data, uctx);
1700 }
1701 
1702 /** Called on the next loop through the event loop when inserting an EVFILT_PROC event fails
1703  *
1704  * This is just a trampoleen function which takes the user event and simulates
1705  * an EVFILT_PROC event from it.
1706  *
1707  * @param[in] el That received the event.
1708  * @param[in] uctx An fr_event_pid_t to process.
1709  */
1711 {
1712  fr_event_pid_t *ev = talloc_get_type_abort(uctx, fr_event_pid_t);
1713 
1714  EVENT_DEBUG("%p - PID %ld exited early, triggered through user event", el, (long)ev->pid);
1715 
1716  /*
1717  * Simulate a real struct kevent with the values we
1718  * recorded in fr_event_pid_wait.
1719  */
1720  event_pid_eval(el, &(struct kevent){ .ident = ev->pid, .data = ev->early_exit.status, .fflags = NOTE_EXIT, .udata = ev });
1721 }
1722 
1723 /** Insert a PID event into an event list
1724  *
1725  * @note The talloc parent of the memory returned in ev_p must not be changed.
1726  * If the lifetime of the event needs to be bound to another context
1727  * this function should be called with the existing event pointed to by
1728  * ev_p.
1729  *
1730  * @param[in] ctx to bind lifetime of the event to.
1731  * @param[in] el to insert event into.
1732  * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
1733  * in a temporal sense, not in a memory structure or dependency sense.
1734  * @param[in] pid child PID to wait for
1735  * @param[in] callback function to execute if the event fires.
1736  * @param[in] uctx user data to pass to the event.
1737  * @return
1738  * - 0 on success.
1739  * - -1 on failure.
1740  */
1742  TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p,
1743  pid_t pid, fr_event_pid_cb_t callback, void *uctx)
1744 {
1745  fr_event_pid_t *ev;
1746  struct kevent evset;
1747 
1748  ev = talloc(ctx, fr_event_pid_t);
1749  if (unlikely(ev == NULL)) {
1750  fr_strerror_const("Out of memory");
1751  return -1;
1752  }
1753  *ev = (fr_event_pid_t) {
1754  .el = el,
1755  .pid = pid,
1756  .callback = callback,
1757  .uctx = uctx,
1758  .parent = ev_p,
1759 #ifndef NDEBUG
1760  .file = file,
1761  .line = line,
1762 #endif
1763  };
1764  talloc_set_destructor(ev, _event_pid_free);
1765 
1766  /*
1767  * macOS only, on FreeBSD NOTE_EXIT always provides
1768  * the status anyway.
1769  */
1770 #ifndef NOTE_EXITSTATUS
1771 #define NOTE_EXITSTATUS (0)
1772 #endif
1773 
1774  EVENT_DEBUG("%p - Adding exit waiter for PID %u", el, (unsigned int)pid);
1775 
1776  EV_SET(&evset, pid, EVFILT_PROC, EV_ADD | EV_ONESHOT, NOTE_EXIT | NOTE_EXITSTATUS, 0, ev);
1777  ev->is_registered = true;
1778 
1779  /*
1780  * This deals with the race where the process exited
1781  * before we could add it to the kqueue.
1782  *
1783  * Unless our caller is broken, the process should
1784  * still be available for reaping, so we check
1785  * waitid to see if there is a pending process and
1786  * then call the callback as kqueue would have done.
1787  */
1788  if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
1789  siginfo_t info;
1790  int ret;
1791 
1792  /*
1793  * Ensure we don't accidentally pick up the error
1794  * from kevent.
1795  */
1797 
1798  ev->is_registered = false;
1799 
1800  /*
1801  * If the child exited before kevent() was
1802  * called, we need to get its status via
1803  * waitid().
1804  *
1805  * We don't reap the process here to emulate
1806  * what kqueue does (notify but not reap).
1807  *
1808  * waitid returns >0 on success, 0 if the
1809  * process is still running, and -1 on failure.
1810  *
1811  * If we get a 0, then that's extremely strange
1812  * as adding the kevent failed for a reason
1813  * other than the process already having exited.
1814  *
1815  * On Linux waitid will always return 1 to
1816  * indicate the process exited.
1817  *
1818  * On macOS we seem to get a mix of 1 or 0,
1819  * even if the si_code is one of the values
1820  * we'd consider to indicate that the process
1821  * had completed.
1822  */
1823  ret = waitid(P_PID, pid, &info, WEXITED | WNOHANG | WNOWAIT);
1824  if (ret > 0) {
1825  static fr_table_num_sorted_t const si_codes[] = {
1826  { L("exited"), CLD_EXITED },
1827  { L("killed"), CLD_KILLED },
1828  { L("dumped"), CLD_DUMPED },
1829  { L("trapped"), CLD_TRAPPED },
1830  { L("stopped"), CLD_STOPPED },
1831  { L("continued"), CLD_CONTINUED }
1832  };
1833  static size_t si_codes_len = NUM_ELEMENTS(si_codes);
1834 
1835  switch (info.si_code) {
1836  case CLD_EXITED:
1837  case CLD_KILLED:
1838  case CLD_DUMPED:
1839  EVENT_DEBUG("%p - PID %ld early exit - code %s (%i), status %i",
1840  el, (long)pid, fr_table_str_by_value(si_codes, info.si_code, "<UNKOWN>"),
1841  info.si_code, info.si_status);
1842 
1843  /*
1844  * Record the status for later
1845  */
1846  ev->early_exit.status = info.si_status;
1847 
1848  /*
1849  * The user event acts as a surrogate for
1850  * an EVFILT_PROC event, and will be evaluated
1851  * during the next loop through the event loop.
1852  *
1853  * It will be automatically deleted when the
1854  * fr_event_pid_t is freed.
1855  *
1856  * Previously we tried to evaluate the proc
1857  * callback here directly, but this lead to
1858  * multiple problems, the biggest being that
1859  * setting requests back to resumable failed
1860  * because they were not yet yielded,
1861  * leading to hangs.
1862  */
1863  early_exit:
1864  if (fr_event_user_insert(ev, el, &ev->early_exit.ev, true, _fr_event_pid_early_exit, ev) < 0) {
1865  fr_strerror_printf_push("Failed adding wait for PID %ld, and failed adding "
1866  "backup user event", (long) pid);
1867  error:
1868  talloc_free(ev);
1869  return -1;
1870  }
1871  break;
1872 
1873  default:
1874  fr_strerror_printf("Unexpected code %s (%u) whilst waiting on PID %ld",
1875  fr_table_str_by_value(si_codes, info.si_code, "<UNKOWN>"),
1876  info.si_code, (long) pid);
1877 
1878  goto error;
1879  }
1880  /*
1881  * Failed adding waiter for process, but process has not completed...
1882  *
1883  * This weird, but seems to happen on macOS occasionally.
1884  *
1885  * Add an event to run early exit...
1886  *
1887  * Man pages for waitid say if it returns 0 the info struct can be in
1888  * a nondeterministic state, so there's nothing more to do.
1889  */
1890  } else if (ret == 0) {
1891  goto early_exit;
1892  } else {
1893  /*
1894  * Print this error here, so that the caller gets
1895  * the error from kevent(), and not waitpid().
1896  */
1897  fr_strerror_printf("Failed adding waiter for PID %ld - kevent %s, waitid %s",
1898  (long) pid, fr_syserror(evset.flags), fr_syserror(errno));
1899 
1900  goto error;
1901  }
1902  }
1903 
1904  /*
1905  * Sometimes the caller doesn't care about getting the
1906  * PID. But we still want to clean it up.
1907  */
1908  if (ev_p) *ev_p = ev;
1909 
1910  return 0;
1911 }
1912 
1913 /** Saves some boilerplate...
1914  *
1915  */
1916 static inline CC_HINT(always_inline)
1917 void event_list_reap_run_callback(fr_event_pid_reap_t *reap, pid_t pid, int status)
1918 {
1919  if (reap->callback) reap->callback(reap->el, pid, status, reap->uctx);
1920 }
1921 
1922 /** Does the actual reaping of PIDs
1923  *
1924  */
1925 static void _fr_event_pid_reap_cb(UNUSED fr_event_list_t *el, pid_t pid, int status, void *uctx)
1926 {
1927  fr_event_pid_reap_t *reap = talloc_get_type_abort(uctx, fr_event_pid_reap_t);
1928 
1929  waitpid(pid, &status, WNOHANG); /* Don't block the process if there's a logic error somewhere */
1930 
1931  EVENT_DEBUG("%s - Reaper reaped PID %u, status %u - %p", __FUNCTION__, pid, status, reap);
1932 
1933  event_list_reap_run_callback(reap, pid, status);
1934 
1935  talloc_free(reap);
1936 }
1937 
1939 {
1940  /*
1941  * Clear out the entry in the pid_to_reap
1942  * list if the event was inserted.
1943  */
1944  if (fr_dlist_entry_in_list(&reap->entry)) {
1945  EVENT_DEBUG("%s - Removing entry from pid_to_reap %i - %p", __FUNCTION__,
1946  reap->pid_ev ? reap->pid_ev->pid : -1, reap);
1947  fr_dlist_remove(&reap->el->pid_to_reap, reap);
1948  }
1949 
1950  return 0;
1951 }
1952 
1953 /** Asynchronously wait for a PID to exit, then reap it
1954  *
1955  * This is intended to be used when we no longer care about a process
1956  * exiting, but we still want to clean up its state so we don't have
1957  * zombie processes sticking around.
1958  *
1959  * @param[in] el to use to reap the process.
1960  * @param[in] pid to reap.
1961  * @param[in] callback to call when the process is reaped.
1962  * May be NULL.
1963  * @param[in] uctx to pass to callback.
1964  * @return
1965  * - -1 if we couldn't find the process or it has already exited/been reaped.
1966  * - 0 on success (we setup a process handler).
1967  */
1969 {
1970  int ret;
1971  fr_event_pid_reap_t *reap;
1972 
1973  reap = talloc_zero(NULL, fr_event_pid_reap_t);
1974  if (unlikely(!reap)) {
1975  fr_strerror_const("Out of memory");
1976  return -1;
1977  }
1978  talloc_set_destructor(reap, _fr_event_reap_free);
1979 
1980  ret = _fr_event_pid_wait(NDEBUG_LOCATION_VALS reap, el, &reap->pid_ev, pid, _fr_event_pid_reap_cb, reap);
1981  if (ret < 0) {
1982  talloc_free(reap);
1983  return ret;
1984  }
1985 
1986  reap->el = el;
1987  reap->callback = callback;
1988  reap->uctx = uctx;
1989 
1990  EVENT_DEBUG("%s - Adding reaper for PID %u - %p", __FUNCTION__, pid, reap);
1991 
1993 
1994  return ret;
1995 }
1996 
1997 /** Send a signal to all the processes we have in our reap list, and reap them
1998  *
1999  * @param[in] el containing the processes to reap.
2000  * @param[in] timeout how long to wait before we signal the processes.
2001  * @param[in] signal to send to processes. Should be a fatal signal.
2002  * @return The number of processes reaped.
2003  */
2005 {
2006  unsigned int processed = fr_dlist_num_elements(&el->pid_to_reap);
2007  fr_event_pid_reap_t *reap = NULL;
2008 
2009  /*
2010  * If we've got a timeout, our best option
2011  * is to use a kqueue instance to monitor
2012  * for process exit.
2013  */
2015  int status;
2016  struct kevent evset;
2017  int waiting = 0;
2018  int kq = kqueue();
2019  fr_time_t now, start = el->time(), end = fr_time_add(start, timeout);
2020 
2021  if (unlikely(kq < 0)) goto force;
2022 
2024  if (!i->pid_ev) {
2025  EVENT_DEBUG("%p - %s - Reaper already called (logic error)... - %p",
2026  el, __FUNCTION__, i);
2027 
2028  event_list_reap_run_callback(i, -1, SIGKILL);
2029  talloc_free(i);
2030  continue;
2031  }
2032 
2033  /*
2034  * See if any processes have exited already
2035  */
2036  if (waitpid(i->pid_ev->pid, &status, WNOHANG) == i->pid_ev->pid) { /* reap */
2037  EVENT_DEBUG("%p - %s - Reaper PID %u already exited - %p",
2038  el, __FUNCTION__, i->pid_ev->pid, i);
2039  event_list_reap_run_callback(i, i->pid_ev->pid, SIGKILL);
2040  talloc_free(i);
2041  continue;
2042  }
2043 
2044  /*
2045  * Add the rest to a temporary event loop
2046  */
2047  EV_SET(&evset, i->pid_ev->pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, i);
2048  if (kevent(kq, &evset, 1, NULL, 0, NULL) < 0) {
2049  EVENT_DEBUG("%p - %s - Failed adding reaper PID %u to tmp event loop - %p",
2050  el, __FUNCTION__, i->pid_ev->pid, i);
2051  event_list_reap_run_callback(i, i->pid_ev->pid, SIGKILL);
2052  talloc_free(i);
2053  continue;
2054  }
2055  waiting++;
2056  }}
2057 
2058  /*
2059  * Keep draining process exits as they come in...
2060  */
2061  while ((waiting > 0) && fr_time_gt(end, (now = el->time()))) {
2062  struct kevent kev;
2063  int ret;
2064 
2065  ret = kevent(kq, NULL, 0, &kev, 1, &fr_time_delta_to_timespec(fr_time_sub(end, now)));
2066  switch (ret) {
2067  default:
2068  EVENT_DEBUG("%p - %s - Reaper tmp loop error %s, forcing process reaping",
2069  el, __FUNCTION__, fr_syserror(errno));
2070  close(kq);
2071  goto force;
2072 
2073  case 0:
2074  EVENT_DEBUG("%p - %s - Reaper timeout waiting for process exit, forcing process reaping",
2075  el, __FUNCTION__);
2076  close(kq);
2077  goto force;
2078 
2079  case 1:
2080  reap = talloc_get_type_abort(kev.udata, fr_event_pid_reap_t);
2081 
2082  EVENT_DEBUG("%p - %s - Reaper reaped PID %u, status %u - %p",
2083  el, __FUNCTION__, (unsigned int)kev.ident, (unsigned int)kev.data, reap);
2084  waitpid(reap->pid_ev->pid, &status, WNOHANG); /* reap */
2085 
2086  event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
2087  talloc_free(reap);
2088  break;
2089  }
2090  waiting--;
2091  }
2092 
2093  close(kq);
2094  }
2095 
2096 force:
2097  /*
2098  * Deal with any lingering reap requests
2099  */
2100  while ((reap = fr_dlist_head(&el->pid_to_reap))) {
2101  int status;
2102 
2103  EVENT_DEBUG("%s - Reaper forcefully reaping PID %u - %p", __FUNCTION__, reap->pid_ev->pid, reap);
2104 
2105  if (kill(reap->pid_ev->pid, signal) < 0) {
2106  /*
2107  * Make sure we don't hang if the
2108  * process has actually exited.
2109  *
2110  * We could check for ESRCH but it's
2111  * not clear if that'd be returned
2112  * for a PID in the unreaped state
2113  * or not...
2114  */
2115  waitpid(reap->pid_ev->pid, &status, WNOHANG);
2116  event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
2117  talloc_free(reap);
2118  continue;
2119  }
2120 
2121  /*
2122  * Wait until the child process exits
2123  */
2124  waitpid(reap->pid_ev->pid, &status, 0);
2125  event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
2127  }
2128 
2129  return processed;
2130 }
2131 
2132 /** Memory will not be freed if we fail to remove the event from the kqueue
2133  *
2134  * It's easier to debug memory leaks with modern tooling than it is
2135  * to determine why we get random failures and event leaks inside of kqueue.
2136  *
2137  * @return
2138  * - 0 on success.
2139  * - -1 on failure.
2140  */
2142 {
2143  if (ev->is_registered) {
2144  struct kevent evset;
2145 
2146  EV_SET(&evset, (uintptr_t)ev, EVFILT_USER, EV_DELETE, 0, 0, 0);
2147 
2148  if (unlikely(kevent(ev->el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
2149  fr_strerror_printf("Failed removing user event - kevent %s", fr_syserror(evset.flags));
2150  return -1;
2151  }
2152  ev->is_registered = false;
2153  }
2154 
2155  return 0;
2156 }
2157 
2158 static inline CC_HINT(always_inline)
2159 void event_user_eval(fr_event_list_t *el, struct kevent *kev)
2160 {
2161  fr_event_user_t *ev;
2162 
2163  /*
2164  * This is just a "wakeup" event, which
2165  * is always ignored.
2166  */
2167  if (kev->ident == 0) return;
2168 
2169  ev = talloc_get_type_abort((void *)kev->ident, fr_event_user_t);
2170  fr_assert((uintptr_t)ev == kev->ident);
2171 
2172  ev->callback(el, ev->uctx);
2173 }
2174 
2175 /** Add a user callback to the event list.
2176  *
2177  * @param[in] ctx to allocate the event in.
2178  * @param[in] el Containing the timer events.
2179  * @param[out] ev_p Where to write a pointer.
2180  * @param[in] trigger Whether the user event is triggered initially.
2181  * @param[in] callback for EVFILT_USER.
2182  * @param[in] uctx for the callback.
2183  * @return
2184  * - 0 on success.
2185  * - -1 on error.
2186  */
2188  TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p,
2189  bool trigger, fr_event_user_cb_t callback, void *uctx)
2190 {
2191  fr_event_user_t *ev;
2192  struct kevent evset;
2193 
2194  ev = talloc(ctx, fr_event_user_t);
2195  if (unlikely(ev == NULL)) {
2196  fr_strerror_const("Out of memory");
2197  return -1;
2198  }
2199  *ev = (fr_event_user_t) {
2200  .el = el,
2201  .callback = callback,
2202  .uctx = uctx,
2203 #ifndef NDEBUG
2204  .file = file,
2205  .line = line,
2206 #endif
2207  };
2208 
2209  EV_SET(&evset, (uintptr_t)ev,
2210  EVFILT_USER, EV_ADD | EV_DISPATCH, (trigger * NOTE_TRIGGER), 0, ev);
2211 
2212  if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
2213  fr_strerror_printf("Failed adding user event - kevent %s", fr_syserror(evset.flags));
2214  talloc_free(ev);
2215  return -1;
2216  }
2217  ev->is_registered = true;
2218  talloc_set_destructor(ev, _event_user_delete);
2219 
2220  if (ev_p) *ev_p = ev;
2221 
2222  return 0;
2223 }
2224 
2225 /** Trigger a user event
2226  *
2227  * @param[in] el containing the user event.
2228  * @param[in] ev Handle for the user event.
2229  * @return
2230  * - 0 on success.
2231  * - -1 on error.
2232  */
2234 {
2235  struct kevent evset;
2236 
2237  EV_SET(&evset, (uintptr_t)ev, EVFILT_USER, EV_ENABLE, NOTE_TRIGGER, 0, NULL);
2238 
2239  if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
2240  fr_strerror_printf("Failed triggering user event - kevent %s", fr_syserror(evset.flags));
2241  return -1;
2242  }
2243 
2244  return 0;
2245 }
2246 
2247 /** Add a pre-event callback to the event list.
2248  *
2249  * Events are serviced in insert order. i.e. insert A, B, we then
2250  * have A running before B.
2251  *
2252  * @param[in] el Containing the timer events.
2253  * @param[in] callback The pre-processing callback.
2254  * @param[in] uctx for the callback.
2255  * @return
2256  * - < 0 on error
2257  * - 0 on success
2258  */
2260 {
2261  fr_event_pre_t *pre;
2262 
2263  pre = talloc(el, fr_event_pre_t);
2264  pre->callback = callback;
2265  pre->uctx = uctx;
2266 
2268 
2269  return 0;
2270 }
2271 
2272 /** Delete a pre-event callback from the event list.
2273  *
2274  * @param[in] el Containing the timer events.
2275  * @param[in] callback The pre-processing callback.
2276  * @param[in] uctx for the callback.
2277  * @return
2278  * - < 0 on error
2279  * - 0 on success
2280  */
2282 {
2283  fr_event_pre_t *pre, *next;
2284 
2285  for (pre = fr_dlist_head(&el->pre_callbacks);
2286  pre != NULL;
2287  pre = next) {
2288  next = fr_dlist_next(&el->pre_callbacks, pre);
2289 
2290  if ((pre->callback == callback) &&
2291  (pre->uctx == uctx)) {
2293  talloc_free(pre);
2294  return 0;
2295  }
2296  }
2297 
2298  return -1;
2299 }
2300 
2301 /** Add a post-event callback to the event list.
2302  *
2303  * Events are serviced in insert order. i.e. insert A, B, we then
2304  * have A running before B.
2305  *
2306  * @param[in] el Containing the timer events.
2307  * @param[in] callback The post-processing callback.
2308  * @param[in] uctx for the callback.
2309  * @return
2310  * - < 0 on error
2311  * - 0 on success
2312  */
2314 {
2315  fr_event_post_t *post;
2316 
2317  post = talloc(el, fr_event_post_t);
2318  post->callback = callback;
2319  post->uctx = uctx;
2320 
2322 
2323  return 0;
2324 }
2325 
2326 /** Delete a post-event callback from the event list.
2327  *
2328  * @param[in] el Containing the timer events.
2329  * @param[in] callback The post-processing callback.
2330  * @param[in] uctx for the callback.
2331  * @return
2332  * - < 0 on error
2333  * - 0 on success
2334  */
2336 {
2337  fr_event_post_t *post, *next;
2338 
2339  for (post = fr_dlist_head(&el->post_callbacks);
2340  post != NULL;
2341  post = next) {
2342  next = fr_dlist_next(&el->post_callbacks, post);
2343 
2344  if ((post->callback == callback) &&
2345  (post->uctx == uctx)) {
2347  talloc_free(post);
2348  return 0;
2349  }
2350  }
2351 
2352  return -1;
2353 }
2354 
2355 /** Run a single scheduled timer event
2356  *
2357  * @param[in] el containing the timer events.
2358  * @param[in] when Process events scheduled to run before or at this time.
2359  * @return
2360  * - 0 no timer events fired.
2361  * - 1 a timer event fired.
2362  */
2364 {
2365  fr_event_timer_cb_t callback;
2366  void *uctx;
2367  fr_event_timer_t *ev;
2368 
2369  if (unlikely(!el)) return 0;
2370 
2371  if (fr_lst_num_elements(el->times) == 0) {
2372  *when = fr_time_wrap(0);
2373  return 0;
2374  }
2375 
2376  ev = fr_lst_peek(el->times);
2377  if (!ev) {
2378  *when = fr_time_wrap(0);
2379  return 0;
2380  }
2381 
2382  /*
2383  * See if it's time to do this one.
2384  */
2385  if (fr_time_gt(ev->when, *when)) {
2386  *when = ev->when;
2387  return 0;
2388  }
2389 
2390  callback = ev->callback;
2391  memcpy(&uctx, &ev->uctx, sizeof(uctx));
2392 
2393  fr_assert(*ev->parent == ev);
2394 
2395  /*
2396  * Delete the event before calling it.
2397  */
2399 
2400  callback(el, *when, uctx);
2401 
2402  return 1;
2403 }
2404 
2405 /** Gather outstanding timer and file descriptor events
2406  *
2407  * @param[in] el to process events for.
2408  * @param[in] now The current time.
2409  * @param[in] wait if true, block on the kevent() call until a timer or file descriptor event occurs.
2410  * @return
2411  * - <0 error, or the event loop is exiting
2412  * - the number of outstanding I/O events, +1 if at least one timer will fire.
2413  */
2415 {
2416  fr_time_delta_t when, *wake;
2417  struct timespec ts_when, *ts_wake;
2418  fr_event_pre_t *pre;
2419  int num_fd_events;
2420  bool timer_event_ready = false;
2421  fr_event_timer_t *ev;
2422 
2423  el->num_fd_events = 0;
2424 
2425  if (el->will_exit || el->exit) {
2426  el->exit = el->will_exit;
2427 
2428  fr_strerror_const("Event loop exiting");
2429  return -1;
2430  }
2431 
2432  /*
2433  * By default we wait for 0ns, which means returning
2434  * immediately from kevent().
2435  */
2436  when = fr_time_delta_wrap(0);
2437  wake = &when;
2438  el->now = now;
2439 
2440  /*
2441  * See when we have to wake up. Either now, if the timer
2442  * events are in the past. Or, we wait for a future
2443  * timer event.
2444  */
2445  ev = fr_lst_peek(el->times);
2446  if (ev) {
2447  if (fr_time_lteq(ev->when, el->now)) {
2448  timer_event_ready = true;
2449 
2450  } else if (wait) {
2451  when = fr_time_sub(ev->when, el->now);
2452 
2453  } /* else we're not waiting, leave "when == 0" */
2454 
2455  } else if (wait) {
2456  /*
2457  * We're asked to wait, but there's no timer
2458  * event. We can then sleep forever.
2459  */
2460  wake = NULL;
2461  }
2462 
2463  /*
2464  * Run the status callbacks. It may tell us that the
2465  * application has more work to do, in which case we
2466  * re-set the timeout to be instant.
2467  *
2468  * We only run these callbacks if the caller is otherwise
2469  * idle.
2470  */
2471  if (wait) {
2472  for (pre = fr_dlist_head(&el->pre_callbacks);
2473  pre != NULL;
2474  pre = fr_dlist_next(&el->pre_callbacks, pre)) {
2475  if (pre->callback(now, wake ? *wake : fr_time_delta_wrap(0), pre->uctx) > 0) {
2476  wake = &when;
2477  when = fr_time_delta_wrap(0);
2478  }
2479  }
2480  }
2481 
2482  /*
2483  * Wake is the delta between el->now
2484  * (the event loops view of the current time)
2485  * and when the event should occur.
2486  */
2487  if (wake) {
2488  ts_when = fr_time_delta_to_timespec(when);
2489  ts_wake = &ts_when;
2490  } else {
2491  ts_wake = NULL;
2492  }
2493 
2494  /*
2495  * Populate el->events with the list of I/O events
2496  * that occurred since this function was last called
2497  * or wait for the next timer event.
2498  */
2499  num_fd_events = kevent(el->kq, NULL, 0, el->events, FR_EV_BATCH_FDS, ts_wake);
2500 
2501  /*
2502  * Interrupt is different from timeout / FD events.
2503  */
2504  if (unlikely(num_fd_events < 0)) {
2505  if (errno == EINTR) {
2506  return 0;
2507  } else {
2508  fr_strerror_printf("Failed calling kevent: %s", fr_syserror(errno));
2509  return -1;
2510  }
2511  }
2512 
2513  el->num_fd_events = num_fd_events;
2514 
2515  EVENT_DEBUG("%p - %s - kevent returned %u FD events", el, __FUNCTION__, el->num_fd_events);
2516 
2517  /*
2518  * If there are no FD events, we must have woken up from a timer
2519  */
2520  if (!num_fd_events) {
2521  el->now = fr_time_add(el->now, when);
2522  if (wait) timer_event_ready = true;
2523  }
2524  /*
2525  * The caller doesn't really care what the value of the
2526  * return code is. Just that it's greater than zero if
2527  * events needs servicing.
2528  *
2529  * num_fd_events > 0 - if kevent() returns FD events
2530  * timer_event_ready > 0 - if there were timers ready BEFORE or AFTER calling kevent()
2531  */
2532  return num_fd_events + timer_event_ready;
2533 }
2534 
2535 static inline CC_HINT(always_inline)
2536 void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags)
2537 {
2538  fr_event_fd_cb_t fd_cb;
2539 
2540  while ((fd_cb = event_fd_func(ef, filter, fflags))) {
2541  fd_cb(el, ef->fd, flags, ef->uctx);
2542  }
2543 }
2544 
2545 /** Service any outstanding timer or file descriptor events
2546  *
2547  * @param[in] el containing events to service.
2548  */
2550 {
2551  int i;
2552  fr_event_post_t *post;
2553  fr_time_t when;
2554  fr_event_timer_t *ev;
2555 
2556  if (unlikely(el->exit)) return;
2557 
2558  EVENT_DEBUG("%p - %s - Servicing %u FD events", el, __FUNCTION__, el->num_fd_events);
2559 
2560  /*
2561  * Run all of the file descriptor events.
2562  */
2563  el->in_handler = true;
2564  for (i = 0; i < el->num_fd_events; i++) {
2565  /*
2566  * Process any user events
2567  */
2568  switch (el->events[i].filter) {
2569  case EVFILT_USER:
2570  event_user_eval(el, &el->events[i]);
2571  continue;
2572 
2573  /*
2574  * Process proc events
2575  */
2576  case EVFILT_PROC:
2577  event_pid_eval(el, &el->events[i]);
2578  continue;
2579 
2580  /*
2581  * Process various types of file descriptor events
2582  */
2583  default:
2584  {
2585  fr_event_fd_t *ef = talloc_get_type_abort(el->events[i].udata, fr_event_fd_t);
2586  int fd_errno = 0;
2587 
2588  int fflags = el->events[i].fflags; /* mutable */
2589  int filter = el->events[i].filter;
2590  int flags = el->events[i].flags;
2591 
2592  if (!ef->is_registered) continue; /* Was deleted between corral and service */
2593 
2594  if (unlikely(flags & EV_ERROR)) {
2595  fd_errno = el->events[i].data;
2596  ev_error:
2597  /*
2598  * Call the error handler, but only if the socket hasn't been deleted at EOF
2599  * below.
2600  */
2601  if (ef->is_registered && ef->error) ef->error(el, ef->fd, flags, fd_errno, ef->uctx);
2602  TALLOC_FREE(ef);
2603  continue;
2604  }
2605 
2606  /*
2607  * EOF can indicate we've actually reached
2608  * the end of a file, but for sockets it usually
2609  * indicates the other end of the connection
2610  * has gone away.
2611  */
2612  if (flags & EV_EOF) {
2613  /*
2614  * This is fine, the callback will get notified
2615  * via the flags field.
2616  */
2617  if (ef->type == FR_EVENT_FD_FILE) goto service;
2618 #if defined(__linux__) && defined(SO_GET_FILTER)
2619  /*
2620  * There seems to be an issue with the
2621  * ioctl(...SIOCNQ...) call libkqueue
2622  * uses to determine the number of bytes
2623  * readable. When ioctl returns, the number
2624  * of bytes available is set to zero, which
2625  * libkqueue interprets as EOF.
2626  *
2627  * As a workaround, if we're not reading
2628  * a file, and are operating on a raw socket
2629  * with a packet filter attached, we ignore
2630  * the EOF flag and continue.
2631  */
2632  if ((ef->sock_type == SOCK_RAW) && (ef->type == FR_EVENT_FD_PCAP)) goto service;
2633 #endif
2634 
2635  /*
2636  * If we see an EV_EOF flag that means the
2637  * read side of the socket has been closed
2638  * but there may still be pending data.
2639  *
2640  * Dispatch the read event and then error.
2641  */
2642  if ((el->events[i].filter == EVFILT_READ) && (el->events[i].data > 0)) {
2643  event_callback(el, ef, &filter, flags, &fflags);
2644  }
2645 
2646  fd_errno = el->events[i].fflags;
2647 
2648  goto ev_error;
2649  }
2650 
2651  service:
2652 #ifndef NDEBUG
2653  EVENT_DEBUG("Running event for fd %d, from %s[%d]", ef->fd, ef->file, ef->line);
2654 #endif
2655 
2656  /*
2657  * Service the event_fd events
2658  */
2659  event_callback(el, ef, &filter, flags, &fflags);
2660  }
2661  }
2662  }
2663 
2664  /*
2665  * Process any deferred frees performed
2666  * by the I/O handlers.
2667  *
2668  * The events are removed from the FD rbtree
2669  * and kevent immediately, but frees are
2670  * deferred to allow stale events to be
2671  * skipped sans SEGV.
2672  */
2673  el->in_handler = false; /* Allow events to be deleted */
2674  {
2675  fr_event_fd_t *ef;
2676 
2677  while ((ef = fr_dlist_head(&el->fd_to_free))) talloc_free(ef);
2678  }
2679 
2680  /*
2681  * We must call el->time() again here, else the event
2682  * list's time gets updated too infrequently, and we
2683  * can end up with a situation where timers are
2684  * serviced much later than they should be, which can
2685  * cause strange interaction effects, spurious calls
2686  * to kevent, and busy loops.
2687  */
2688  el->now = el->time();
2689 
2690  /*
2691  * Run all of the timer events. Note that these can add
2692  * new timers!
2693  */
2694  if (fr_lst_num_elements(el->times) > 0) {
2695  el->in_handler = true;
2696 
2697  do {
2698  when = el->now;
2699  } while (fr_event_timer_run(el, &when) == 1);
2700 
2701  el->in_handler = false;
2702  }
2703 
2704  /*
2705  * New timers can be added while running the timer
2706  * callback. Instead of being added to the main timer
2707  * lst, they are instead added to the "to do" list.
2708  * Once we're finished running the callbacks, we walk
2709  * through the "to do" list, and add the callbacks to the
2710  * timer lst.
2711  *
2712  * Doing it this way prevents the server from running
2713  * into an infinite loop. The timer callback MAY add a
2714  * new timer which is in the past. The loop above would
2715  * then immediately run the new callback, which could
2716  * also add an event in the past...
2717  */
2718  while ((ev = fr_dlist_head(&el->ev_to_add)) != NULL) {
2719  (void)fr_dlist_remove(&el->ev_to_add, ev);
2720  if (unlikely(fr_lst_insert(el->times, ev) < 0)) {
2721  talloc_free(ev);
2722  fr_assert_msg(0, "failed inserting lst event: %s", fr_strerror()); /* Die in debug builds */
2723  }
2724  }
2725  el->now = el->time();
2726 
2727  /*
2728  * Run all of the post-processing events.
2729  */
2730  for (post = fr_dlist_head(&el->post_callbacks);
2731  post != NULL;
2732  post = fr_dlist_next(&el->post_callbacks, post)) {
2733  post->callback(el, el->now, post->uctx);
2734  }
2735 }
2736 
2737 /** Signal an event loop exit with the specified code
2738  *
2739  * The event loop will complete its current iteration, and then exit with the specified code.
2740  *
2741  * @param[in] el to signal to exit.
2742  * @param[in] code for #fr_event_loop to return.
2743  */
2745 {
2746  if (unlikely(!el)) return;
2747 
2748  el->will_exit = code;
2749 }
2750 
2751 /** Check to see whether the event loop is in the process of exiting
2752  *
2753  * @param[in] el to check.
2754  */
2756 {
2757  return ((el->will_exit != 0) || (el->exit != 0));
2758 }
2759 
2760 /** Run an event loop
2761  *
2762  * @note Will not return until #fr_event_loop_exit is called.
2763  *
2764  * @param[in] el to start processing.
2765  */
2766 CC_HINT(flatten) int fr_event_loop(fr_event_list_t *el)
2767 {
2768  el->will_exit = el->exit = 0;
2769 
2770  el->dispatch = true;
2771  while (!el->exit) {
2772  if (unlikely(fr_event_corral(el, el->time(), true)) < 0) break;
2774  }
2775 
2776  /*
2777  * Give processes five seconds to exit.
2778  * This means any triggers that we may
2779  * have issued when the server exited
2780  * have a chance to complete.
2781  */
2783  el->dispatch = false;
2784 
2785  return el->exit;
2786 }
2787 
2788 /** Cleanup an event list
2789  *
2790  * Frees/destroys any resources associated with an event list
2791  *
2792  * @param[in] el to free resources for.
2793  */
2795 {
2796  fr_event_timer_t const *ev;
2797 
2798  while ((ev = fr_lst_peek(el->times)) != NULL) fr_event_timer_delete(&ev);
2799 
2801 
2802  talloc_free_children(el);
2803 
2804  if (el->kq >= 0) close(el->kq);
2805 
2806  return 0;
2807 }
2808 
2809 /** Free any memory we allocated for indexes
2810  *
2811  */
2813 {
2814  unsigned int i;
2815 
2816  for (i = 0; i < NUM_ELEMENTS(filter_maps); i++) if (talloc_free(filter_maps[i].ev_to_func) < 0) return -1;
2817  return 0;
2818 }
2819 
2821 {
2822  unsigned int i;
2823 
2824  for (i = 0; i < NUM_ELEMENTS(filter_maps); i++) event_fd_func_index_build(&filter_maps[i]);
2825  return 0;
2826 }
2827 
2828 #ifdef EVFILT_LIBKQUEUE
2829 /** kqueue logging wrapper function
2830  *
2831  */
2832 static CC_HINT(format (printf, 1, 2)) CC_HINT(nonnull)
2833 void _event_kqueue_log(char const *fmt, ...)
2834 {
2835  va_list ap;
2836 
2837  va_start(ap, fmt);
2838  fr_vlog(&default_log, L_DBG, __FILE__, __LINE__, fmt, ap);
2839  va_end(ap);
2840 }
2841 
2842 /** If we're building with libkqueue, and at debug level 4 or higher, enable libkqueue debugging output
2843  *
2844  * This requires a debug build of libkqueue
2845  */
2846 static int _event_kqueue_logging(UNUSED void *uctx)
2847 {
2848  struct kevent kev, receipt;
2849 
2850  log_conf_kq = kqueue();
2851  if (unlikely(log_conf_kq < 0)) {
2852  fr_strerror_const("Failed initialising logging configuration kqueue");
2853  return -1;
2854  }
2855 
2856  EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG_FUNC, (intptr_t)_event_kqueue_log, NULL);
2857  if (kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}) != 1) {
2858  close(log_conf_kq);
2859  log_conf_kq = -1;
2860  return 1;
2861  }
2862 
2863  if (fr_debug_lvl >= L_DBG_LVL_3) {
2864  EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG, 1, NULL);
2865  if (kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}) != 1) {
2866  fr_strerror_const("Failed enabling libkqueue debug logging");
2867  close(log_conf_kq);
2868  log_conf_kq = -1;
2869  return -1;
2870  }
2871  }
2872 
2873  return 0;
2874 }
2875 
2876 static int _event_kqueue_logging_stop(UNUSED void *uctx)
2877 {
2878  struct kevent kev, receipt;
2879 
2880  EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG_FUNC, 0, NULL);
2881  (void)kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){});
2882 
2883  close(log_conf_kq);
2884  log_conf_kq = -1;
2885 
2886  return 0;
2887 }
2888 #endif
2889 
2890 /** Initialise a new event list
2891  *
2892  * @param[in] ctx to allocate memory in.
2893  * @param[in] status callback, called on each iteration of the event list.
2894  * @param[in] status_uctx context for the status callback
2895  * @return
2896  * - A pointer to a new event list on success (free with talloc_free).
2897  * - NULL on error.
2898  */
2899 fr_event_list_t *fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_uctx)
2900 {
2902  struct kevent kev;
2903  int ret;
2904 
2905  /*
2906  * Build the map indexes the first time this
2907  * function is called.
2908  */
2909  fr_atexit_global_once_ret(&ret, _event_build_indexes, _event_free_indexes, NULL);
2910 #ifdef EVFILT_LIBKQUEUE
2911  fr_atexit_global_once_ret(&ret, _event_kqueue_logging, _event_kqueue_logging_stop, NULL);
2912 #endif
2913 
2914  el = talloc_zero(ctx, fr_event_list_t);
2915  if (!fr_cond_assert(el)) {
2916  fr_strerror_const("Out of memory");
2917  return NULL;
2918  }
2919  el->time = fr_time;
2920  el->kq = -1; /* So destructor can be used before kqueue() provides us with fd */
2921  talloc_set_destructor(el, _event_list_free);
2922 
2924  if (!el->times) {
2925  fr_strerror_const("Failed allocating event lst");
2926  error:
2927  talloc_free(el);
2928  return NULL;
2929  }
2930 
2932  if (!el->fds) {
2933  fr_strerror_const("Failed allocating FD tree");
2934  goto error;
2935  }
2936 
2937  el->kq = kqueue();
2938  if (el->kq < 0) {
2939  fr_strerror_printf("Failed allocating kqueue: %s", fr_syserror(errno));
2940  goto error;
2941  }
2942 
2948  if (status) (void) fr_event_pre_insert(el, status, status_uctx);
2949 
2950  /*
2951  * Set our "exit" callback as ident 0.
2952  */
2953  EV_SET(&kev, 0, EVFILT_USER, EV_ADD | EV_CLEAR, NOTE_FFNOP, 0, NULL);
2954  if (kevent(el->kq, &kev, 1, NULL, 0, NULL) < 0) {
2955  fr_strerror_printf("Failed adding exit callback to kqueue: %s", fr_syserror(errno));
2956  goto error;
2957  }
2958 
2959 #ifdef WITH_EVENT_DEBUG
2960  fr_event_timer_in(el, el, &el->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), fr_event_report, NULL);
2961 #endif
2962 
2963  return el;
2964 }
2965 
2966 /** Override event list time source
2967  *
2968  * @param[in] el to set new time function for.
2969  * @param[in] func to set.
2970  */
2972 {
2973  el->time = func;
2974 }
2975 
2976 /** Return whether the event loop has any active events
2977  *
2978  */
2980 {
2982 }
2983 
2984 #ifdef WITH_EVENT_DEBUG
2985 static const fr_time_delta_t decades[18] = {
2986  { 1 }, { 10 }, { 100 },
2987  { 1000 }, { 10000 }, { 100000 },
2988  { 1000000 }, { 10000000 }, { 100000000 },
2989  { 1000000000 }, { 10000000000 }, { 100000000000 },
2990  { 1000000000000 }, { 10000000000000 }, { 100000000000000 },
2991  { 1000000000000000 }, { 10000000000000000 }, { 100000000000000000 },
2992 };
2993 
2994 static const char *decade_names[18] = {
2995  "1ns", "10ns", "100ns",
2996  "1us", "10us", "100us",
2997  "1ms", "10ms", "100ms",
2998  "1s", "10s", "100s",
2999  "1Ks", "10Ks", "100Ks",
3000  "1Ms", "10Ms", "100Ms", /* 1 year is 300Ms */
3001 };
3002 
3003 typedef struct {
3004  fr_rb_node_t node;
3005  char const *file;
3006  int line;
3007  uint32_t count;
3008 } fr_event_counter_t;
3009 
3010 static int8_t event_timer_location_cmp(void const *one, void const *two)
3011 {
3012  fr_event_counter_t const *a = one;
3013  fr_event_counter_t const *b = two;
3014 
3015  CMP_RETURN(a, b, file);
3016 
3017  return CMP(a->line, b->line);
3018 }
3019 
3020 
3021 /** Print out information about the number of events in the event loop
3022  *
3023  */
3024 void fr_event_report(fr_event_list_t *el, fr_time_t now, void *uctx)
3025 {
3027  fr_event_timer_t const *ev;
3028  size_t i;
3029 
3030  size_t array[NUM_ELEMENTS(decades)] = { 0 };
3031  fr_rb_tree_t *locations[NUM_ELEMENTS(decades)];
3032  TALLOC_CTX *tmp_ctx;
3033  static pthread_mutex_t print_lock = PTHREAD_MUTEX_INITIALIZER;
3034 
3035  tmp_ctx = talloc_init_const("temporary stats");
3036  if (!tmp_ctx) {
3037  oom:
3038  EVENT_DEBUG("Can't do report, out of memory");
3039  talloc_free(tmp_ctx);
3040  return;
3041  }
3042 
3043  for (i = 0; i < NUM_ELEMENTS(decades); i++) {
3044  locations[i] = fr_rb_inline_alloc(tmp_ctx, fr_event_counter_t, node, event_timer_location_cmp, NULL);
3045  if (!locations[i]) goto oom;
3046  }
3047 
3048  /*
3049  * Show which events are due, when they're due,
3050  * and where they were allocated
3051  */
3052  for (ev = fr_lst_iter_init(el->times, &iter);
3053  ev != NULL;
3054  ev = fr_lst_iter_next(el->times, &iter)) {
3055  fr_time_delta_t diff = fr_time_sub(ev->when, now);
3056 
3057  for (i = 0; i < NUM_ELEMENTS(decades); i++) {
3058  if ((fr_time_delta_cmp(diff, decades[i]) <= 0) || (i == NUM_ELEMENTS(decades) - 1)) {
3059  fr_event_counter_t find = { .file = ev->file, .line = ev->line };
3060  fr_event_counter_t *counter;
3061 
3062  counter = fr_rb_find(locations[i], &find);
3063  if (!counter) {
3064  counter = talloc(locations[i], fr_event_counter_t);
3065  if (!counter) goto oom;
3066  counter->file = ev->file;
3067  counter->line = ev->line;
3068  counter->count = 1;
3069  fr_rb_insert(locations[i], counter);
3070  } else {
3071  counter->count++;
3072  }
3073 
3074  array[i]++;
3075  break;
3076  }
3077  }
3078  }
3079 
3080  pthread_mutex_lock(&print_lock);
3081  EVENT_DEBUG("%p - Event list stats", el);
3082  EVENT_DEBUG(" fd events : %"PRIu64, fr_event_list_num_fds(el));
3083  EVENT_DEBUG(" events last iter : %u", el->num_fd_events);
3084  EVENT_DEBUG(" num timer events : %"PRIu64, fr_event_list_num_timers(el));
3085 
3086  for (i = 0; i < NUM_ELEMENTS(decades); i++) {
3087  fr_rb_iter_inorder_t event_iter;
3088  void *node;
3089 
3090  if (!array[i]) continue;
3091 
3092  if (i == 0) {
3093  EVENT_DEBUG(" events <= %5s : %zu", decade_names[i], array[i]);
3094  } else if (i == (NUM_ELEMENTS(decades) - 1)) {
3095  EVENT_DEBUG(" events > %5s : %zu", decade_names[i - 1], array[i]);
3096  } else {
3097  EVENT_DEBUG(" events %5s - %5s : %zu", decade_names[i - 1], decade_names[i], array[i]);
3098  }
3099 
3100  for (node = fr_rb_iter_init_inorder(&event_iter, locations[i]);
3101  node;
3102  node = fr_rb_iter_next_inorder(&event_iter)) {
3103  fr_event_counter_t *counter = talloc_get_type_abort(node, fr_event_counter_t);
3104 
3105  EVENT_DEBUG(" : %u allocd at %s[%u]",
3106  counter->count, counter->file, counter->line);
3107  }
3108  }
3109  pthread_mutex_unlock(&print_lock);
3110 
3111  fr_event_timer_in(el, el, &el->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), fr_event_report, uctx);
3112  talloc_free(tmp_ctx);
3113 }
3114 
3115 #ifndef NDEBUG
3116 void fr_event_timer_dump(fr_event_list_t *el)
3117 {
3119  fr_event_timer_t *ev;
3120  fr_time_t now;
3121 
3122  now = el->time();
3123 
3124  EVENT_DEBUG("Time is now %"PRId64"", fr_time_unwrap(now));
3125 
3126  for (ev = fr_lst_iter_init(el->times, &iter);
3127  ev;
3128  ev = fr_lst_iter_next(el->times, &iter)) {
3129  (void)talloc_get_type_abort(ev, fr_event_timer_t);
3130  EVENT_DEBUG("%s[%u]: %p time=%" PRId64 " (%c), callback=%p",
3131  ev->file, ev->line, ev, fr_time_unwrap(ev->when),
3132  fr_time_gt(now, ev->when) ? '<' : '>', ev->callback);
3133  }
3134 }
3135 #endif
3136 #endif
3137 
3138 #ifdef TESTING
3139 
3140 /*
3141  * cc -g -I .. -c rb.c -o rbtree.o && cc -g -I .. -c isaac.c -o isaac.o && cc -DTESTING -I .. -c event.c -o event_mine.o && cc event_mine.o rbtree.o isaac.o -o event
3142  *
3143  * ./event
3144  *
3145  * And hit CTRL-S to stop the output, CTRL-Q to continue.
3146  * It normally alternates printing the time and sleeping,
3147  * but when you hit CTRL-S/CTRL-Q, you should see a number
3148  * of events run right after each other.
3149  *
3150  * OR
3151  *
3152  * valgrind --tool=memcheck --leak-check=full --show-reachable=yes ./event
3153  */
3154 
3155 static void print_time(void *ctx)
3156 {
3157  fr_time_t when;
3158  int64_t usec;
3159 
3160  when = *(fr_time_t *) ctx;
3161  usec = fr_time_to_usec(when);
3162 
3163  printf("%d.%06d\n", usec / USEC, usec % USEC);
3164  fflush(stdout);
3165 }
3166 
3167 static fr_randctx rand_pool;
3168 
3169 static uint32_t event_rand(void)
3170 {
3171  uint32_t num;
3172 
3173  num = rand_pool.randrsl[rand_pool.randcnt++];
3174  if (rand_pool.randcnt == 256) {
3175  fr_isaac(&rand_pool);
3176  rand_pool.randcnt = 0;
3177  }
3178 
3179  return num;
3180 }
3181 
3182 
3183 #define MAX 100
3184 int main(int argc, char **argv)
3185 {
3186  int i, rcode;
3187  fr_time_t array[MAX];
3188  fr_time_t now, when;
3190 
3191  el = fr_event_list_alloc(NULL, NULL);
3192  if (!el) fr_exit_now(1);
3193 
3194  memset(&rand_pool, 0, sizeof(rand_pool));
3195  rand_pool.randrsl[1] = time(NULL);
3196 
3197  fr_rand_init(&rand_pool, 1);
3198  rand_pool.randcnt = 0;
3199 
3200  array[0] = el->time();
3201  for (i = 1; i < MAX; i++) {
3202  array[i] = array[i - 1];
3203  array[i] += event_rand() & 0xffff;
3204 
3205  fr_event_timer_at(NULL, el, array[i], print_time, array[i]);
3206  }
3207 
3208  while (fr_event_list_num_timers(el)) {
3209  now = el->time();
3210  when = now;
3211  if (!fr_event_timer_run(el, &when)) {
3212  int delay = (when - now) / 1000; /* nanoseconds to microseconds */
3213 
3214  printf("\tsleep %d microseconds\n", delay);
3215  fflush(stdout);
3216  usleep(delay);
3217  }
3218  }
3219 
3220  talloc_free(el);
3221 
3222  return 0;
3223 }
3224 #endif
int const char * file
Definition: acutest.h:702
va_end(args)
static int const char * fmt
Definition: acutest.h:573
int const char int line
Definition: acutest.h:702
va_start(args, fmt)
#define UNCONST(_type, _ptr)
Remove const qualification from a pointer.
Definition: build.h:165
#define RCSID(id)
Definition: build.h:481
#define DIAG_UNKNOWN_PRAGMAS
Definition: build.h:454
#define L(_str)
Helper for initialising arrays of string literals.
Definition: build.h:207
#define typeof_field(_type, _field)
Typeof field.
Definition: build.h:172
#define DIAG_ON(_x)
Definition: build.h:456
#define CMP_RETURN(_a, _b, _field)
Return if the comparison is not 0 (is unequal)
Definition: build.h:119
#define CMP(_a, _b)
Same as CMP_PREFER_SMALLER use when you don't really care about ordering, you just want an ordering.
Definition: build.h:110
#define unlikely(_x)
Definition: build.h:379
#define NDEBUG_LOCATION_VALS
Definition: build.h:262
#define NDEBUG_LOCATION_ARGS
Pass caller information to the function.
Definition: build.h:261
#define UNUSED
Definition: build.h:313
#define NUM_ELEMENTS(_t)
Definition: build.h:335
#define DIAG_OFF(_x)
Definition: build.h:455
static int kq
Definition: control_test.c:46
next
Definition: dcursor.h:178
fr_dcursor_eval_t void const * uctx
Definition: dcursor.h:546
fr_dcursor_iter_t iter
Definition: dcursor.h:147
#define fr_cond_assert(_x)
Calls panic_action ifndef NDEBUG, else logs error and evaluates to value of _x.
Definition: debug.h:139
#define fr_assert_msg(_x, _msg,...)
Calls panic_action ifndef NDEBUG, else logs error and causes the server to exit immediately with code...
Definition: debug.h:210
#define fr_assert_fail(_msg,...)
Calls panic_action ifndef NDEBUG, else logs error.
Definition: debug.h:216
#define fr_cond_assert_msg(_x, _fmt,...)
Calls panic_action ifndef NDEBUG, else logs error and evaluates to value of _x.
Definition: debug.h:156
#define fr_exit_now(_x)
Exit without calling atexit() handlers, producing a log message in debug builds.
Definition: debug.h:234
int main(int argc, char **argv)
Definition: dhcpclient.c:521
static fr_time_delta_t timeout
Definition: dhcpclient.c:54
static void * fr_dlist_next(fr_dlist_head_t const *list_head, void const *ptr)
Get the next item in a list.
Definition: dlist.h:555
static bool fr_dlist_entry_in_list(fr_dlist_t const *entry)
Check if a list entry is part of a list.
Definition: dlist.h:163
static unsigned int fr_dlist_num_elements(fr_dlist_head_t const *head)
Return the number of elements in the dlist.
Definition: dlist.h:939
static void * fr_dlist_head(fr_dlist_head_t const *list_head)
Return the HEAD item of a list or NULL if the list is empty.
Definition: dlist.h:486
static int fr_dlist_insert_tail(fr_dlist_head_t *list_head, void *ptr)
Insert an item into the tail of a list.
Definition: dlist.h:378
static void * fr_dlist_remove(fr_dlist_head_t *list_head, void *ptr)
Remove an item from the list.
Definition: dlist.h:638
#define fr_dlist_talloc_init(_head, _type, _field)
Initialise the head structure of a doubly linked list.
Definition: dlist.h:275
static int fr_dlist_insert_head(fr_dlist_head_t *list_head, void *ptr)
Insert an item into the head of a list.
Definition: dlist.h:338
#define fr_dlist_foreach_safe(_list_head, _type, _iter)
Iterate over the contents of a list allowing for removals.
Definition: dlist.h:108
Head of a doubly linked list.
Definition: dlist.h:51
Entry in a doubly linked list.
Definition: dlist.h:41
#define fr_event_user_insert(_ctx, _ev_p, _el, _trigger, _callback, _uctx)
Definition: event.h:280
fr_event_io_func_t io
Read/write functions.
Definition: event.h:199
void(* fr_event_timer_cb_t)(fr_event_list_t *el, fr_time_t now, void *uctx)
Called when a timer event fires.
Definition: event.h:118
struct fr_event_user_s fr_event_user_t
An opaquer user event handle.
Definition: event.h:57
void(* fr_event_fd_cb_t)(fr_event_list_t *el, int fd, int flags, void *uctx)
Called when an IO event occurs on a file descriptor.
Definition: event.h:137
@ FR_EVENT_OP_SUSPEND
Temporarily remove the relevant filter from kevent.
Definition: event.h:69
@ FR_EVENT_OP_RESUME
Reinsert the filter into kevent.
Definition: event.h:70
fr_event_filter_t
The type of filter to install for an FD.
Definition: event.h:61
@ FR_EVENT_FILTER_VNODE
Filter for vnode subfilters.
Definition: event.h:63
@ FR_EVENT_FILTER_IO
Combined filter for read/write functions/.
Definition: event.h:62
size_t offset
Offset of function in func struct.
Definition: event.h:76
struct fr_event_pid fr_event_pid_t
An opaque PID status handle.
Definition: event.h:53
fr_event_fd_cb_t read
Callback for when data is available.
Definition: event.h:174
void(* fr_event_pid_cb_t)(fr_event_list_t *el, pid_t pid, int status, void *uctx)
Called when a child process has exited.
Definition: event.h:156
void(* fr_event_error_cb_t)(fr_event_list_t *el, int fd, int flags, int fd_errno, void *uctx)
Called when an IO error event occurs on a file descriptor.
Definition: event.h:147
fr_time_t(* fr_event_time_source_t)(void)
Alternative time source, useful for testing.
Definition: event.h:169
int(* fr_event_status_cb_t)(fr_time_t now, fr_time_delta_t wake, void *uctx)
Called after each event loop cycle.
Definition: event.h:128
fr_event_op_t op
Operation to perform on function/filter.
Definition: event.h:77
#define fr_event_timer_at(...)
Definition: event.h:250
#define fr_event_timer_in(...)
Definition: event.h:255
void(* fr_event_user_cb_t)(fr_event_list_t *el, void *uctx)
Called when a user kevent occurs.
Definition: event.h:163
Callbacks for the FR_EVENT_FILTER_IO filter.
Definition: event.h:173
Structure describing a modification to a filter's state.
Definition: event.h:75
Callbacks for the FR_EVENT_FILTER_VNODE filter.
Definition: event.h:180
Union of all filter functions.
Definition: event.h:198
free(array)
void fr_isaac(fr_randctx *ctx)
Definition: isaac.c:46
int fr_event_post_delete(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx)
Delete a post-event callback from the event list.
Definition: event.c:2335
void fr_event_service(fr_event_list_t *el)
Service any outstanding timer or file descriptor events.
Definition: event.c:2549
fr_dlist_head_t ev_to_add
dlist of events to add
Definition: event.c:440
static int _event_timer_free(fr_event_timer_t *ev)
Remove an event from the event loop.
Definition: event.c:1396
fr_event_list_t * fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_uctx)
Initialise a new event list.
Definition: event.c:2899
static fr_event_func_map_t filter_maps[]
Definition: event.c:172
static int8_t fr_event_timer_cmp(void const *a, void const *b)
Compare two timer events to see which one should occur first.
Definition: event.c:576
static int fr_event_fd_type_set(fr_event_fd_t *ef, int fd)
Discover the type of a file descriptor.
Definition: event.c:811
fr_dlist_t entry
List of deferred timer events.
Definition: event.c:114
fr_event_func_map_entry_t * func_to_ev
Function -> Event maps coalesced, out of order.
Definition: event.c:168
fr_event_error_cb_t error
Callback for when an error occurs on the FD.
Definition: event.c:310
char const * file
Source file this event was last updated in.
Definition: event.c:352
static int8_t fr_event_fd_cmp(void const *one, void const *two)
Compare two file descriptor handles.
Definition: event.c:589
fr_event_pid_cb_t callback
callback to run when the child exits
Definition: event.c:368
fr_event_funcs_t stored
Stored (set, but inactive) filter functions.
Definition: event.c:308
static ssize_t fr_event_build_evset(UNUSED fr_event_list_t *el, struct kevent out_kev[], size_t outlen, fr_event_funcs_t *active, fr_event_fd_t *ef, fr_event_funcs_t const *new, fr_event_funcs_t const *prev)
Build a new evset based on function pointers present.
Definition: event.c:678
fr_dlist_head_t pid_to_reap
A list of all orphaned child processes we're waiting to reap.
Definition: event.c:431
bool is_registered
Whether this fr_event_fd_t's FD has been registered with kevent.
Definition: event.c:314
fr_rb_tree_t * fds
Tree used to track FDs with filters in kqueue.
Definition: event.c:413
char const * file
Source file this event was last updated in.
Definition: event.c:327
fr_time_t fr_event_list_time(fr_event_list_t *el)
Get the current server time according to the event list.
Definition: event.c:643
int fr_event_pre_delete(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx)
Delete a pre-event callback from the event list.
Definition: event.c:2281
fr_time_t fr_event_timer_when(fr_event_timer_t const *ev)
Internal timestamp representing when the timer should fire.
Definition: event.c:1633
void fr_event_list_set_time_func(fr_event_list_t *el, fr_event_time_source_t func)
Override event list time source.
Definition: event.c:2971
fr_event_list_t * el
Event list this event belongs to.
Definition: event.c:333
static void event_list_reap_run_callback(fr_event_pid_reap_t *reap, pid_t pid, int status)
Saves some boilerplate...
Definition: event.c:1917
int line
Line this event was last updated on.
Definition: event.c:386
static int _event_fd_delete(fr_event_fd_t *ef)
Remove a file descriptor from the event loop and rbtree but don't explicitly free it.
Definition: event.c:868
int _fr_event_pid_reap(NDEBUG_LOCATION_ARGS fr_event_list_t *el, pid_t pid, fr_event_pid_cb_t callback, void *uctx)
Asynchronously wait for a PID to exit, then reap it.
Definition: event.c:1968
fr_event_filter_t filter
Definition: event.c:300
fr_dlist_head_t pre_callbacks
callbacks when we may be idle...
Definition: event.c:428
#define FR_EVENT_FD_PCAP
Definition: event.c:145
void * uctx
Context pointer to pass to each file descriptor callback.
Definition: event.c:369
fr_event_status_cb_t callback
The callback to call.
Definition: event.c:395
fr_event_timer_cb_t callback
The callback to call.
Definition: event.c:404
static void _fr_event_pid_reap_cb(UNUSED fr_event_list_t *el, pid_t pid, int status, void *uctx)
Does the actual reaping of PIDs.
Definition: event.c:1925
int line
Line this event was last updated on.
Definition: event.c:120
int line
Line this event was last updated on.
Definition: event.c:353
static size_t kevent_filter_table_len
Definition: event.c:93
fr_dlist_head_t post_callbacks
post-processing callbacks
Definition: event.c:429
int num_fd_events
Number of events in this event list.
Definition: event.c:424
fr_event_timer_cb_t callback
Callback to execute when the timer fires.
Definition: event.c:105
int _fr_event_user_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p, bool trigger, fr_event_user_cb_t callback, void *uctx)
Add a user callback to the event list.
Definition: event.c:2187
fr_event_fd_type_t type
Type of events we're interested in.
Definition: event.c:303
static fr_table_num_sorted_t const fr_event_fd_type_table[]
Definition: event.c:283
static size_t fr_event_fd_type_table_len
Definition: event.c:289
int _fr_event_timer_in(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p, fr_time_delta_t delta, fr_event_timer_cb_t callback, void const *uctx)
Insert a timer event into an event list.
Definition: event.c:1596
uint16_t flags
Flags to use for inserting event.
Definition: event.c:159
waitpid(reap->pid_ev->pid, &status, 0)
fr_event_pid_cb_t callback
callback to run when the child exits
Definition: event.c:341
static int _event_list_free(fr_event_list_t *el)
Cleanup an event list.
Definition: event.c:2794
bool coalesce
Coalesce this map with the next.
Definition: event.c:162
fr_dlist_t entry
Entry in free list.
Definition: event.c:320
int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait)
Gather outstanding timer and file descriptor events.
Definition: event.c:2414
static int _event_free_indexes(UNUSED void *uctx)
Free any memory we allocated for indexes.
Definition: event.c:2812
fr_event_fd_cb_t fr_event_fd_cb(fr_event_fd_t *ef, int kq_filter, int kq_fflags)
Returns the appropriate callback function for a given event.
Definition: event.c:1318
void * uctx
Context for the callback.
Definition: event.c:405
bool is_registered
Whether this user event has been registered with the event loop.
Definition: event.c:378
void const * uctx
Context pointer to pass to the callback.
Definition: event.c:106
return processed
Definition: event.c:2129
int type
Type this filter applies to.
Definition: event.c:161
uint64_t fr_event_list_num_timers(fr_event_list_t *el)
Return the number of timer events currently scheduled.
Definition: event.c:613
fr_event_func_map_t const * map
Function map between fr_event_funcs_t and kevent filters.
Definition: event.c:312
void * uctx
Context for the callback.
Definition: event.c:396
int _fr_event_pid_wait(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p, pid_t pid, fr_event_pid_cb_t callback, void *uctx)
Insert a PID event into an event list.
Definition: event.c:1741
char const * name
Name of the event.
Definition: event.c:157
int fr_event_user_trigger(fr_event_list_t *el, fr_event_user_t *ev)
Trigger a user event.
Definition: event.c:2233
char const * file
Source file this event was last updated in.
Definition: event.c:119
void * fr_event_fd_uctx(fr_event_fd_t *ef)
Returns the uctx associated with an fr_event_fd_t handle.
Definition: event.c:1326
int line
Line this event was last updated on.
Definition: event.c:328
uintptr_t armour
protection flag from being deleted.
Definition: event.c:323
int kq
instance associated with this event list.
Definition: event.c:426
fr_event_user_cb_t callback
The callback to call.
Definition: event.c:381
talloc_free(reap)
int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
Unarmour an FD.
Definition: event.c:1372
int sock_type
The type of socket SOCK_STREAM, SOCK_RAW etc...
Definition: event.c:305
uint64_t fr_event_list_num_fds(fr_event_list_t *el)
Return the number of file descriptors is_registered with this event loop.
Definition: event.c:601
void * uctx
Context pointer to pass to each file descriptor callback.
Definition: event.c:317
fr_event_func_idx_type_t idx_type
What type of index we use for event to function mapping.
Definition: event.c:166
fr_event_timer_t const ** parent
A pointer to the parent structure containing the timer event.
Definition: event.c:110
#define GET_FUNC(_ef, _offset)
static fr_event_fd_cb_t event_fd_func(fr_event_fd_t *ef, int *filter, int *fflags)
Figure out which function to call given a kevent.
Definition: event.c:529
static int _fr_event_reap_free(fr_event_pid_reap_t *reap)
Definition: event.c:1938
pid_t pid
child to wait for
Definition: event.c:338
bool in_handler
Deletes should be deferred until after the handlers complete.
Definition: event.c:436
static void event_pid_eval(fr_event_list_t *el, struct kevent *kev)
Evaluate a EVFILT_PROC event.
Definition: event.c:1663
int fr_event_list_kq(fr_event_list_t *el)
Return the kq associated with an event list.
Definition: event.c:625
fr_time_t now
The last time the event list was serviced.
Definition: event.c:421
#define fr_time()
Definition: event.c:60
void * uctx
Context for the callback.
Definition: event.c:382
bool is_registered
Whether this user event has been registered with the event loop.
Definition: event.c:335
int fr_event_timer_delete(fr_event_timer_t const **ev_p)
Delete a timer event from the event list.
Definition: event.c:1611
int fr_event_post_insert(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx)
Add a post-event callback to the event list.
Definition: event.c:2313
fr_event_list_t * el
Event list this event belongs to.
Definition: event.c:376
struct fr_event_pid::@129 early_exit
Fields that are only used if we're being triggered by a user event.
bool fr_event_list_empty(fr_event_list_t *el)
Return whether the event loop has any active events.
Definition: event.c:2979
static int _event_build_indexes(UNUSED void *uctx)
Definition: event.c:2820
fr_lst_index_t lst_id
Where to store opaque lst data.
Definition: event.c:113
unsigned int fr_event_list_reap_signal(fr_event_list_t *el, fr_time_delta_t timeout, int signal)
Send a signal to all the processes we have in our reap list, and reap them.
Definition: event.c:2004
int16_t filter
Filter to apply.
Definition: event.c:158
static void event_fd_func_index_build(fr_event_func_map_t *map)
Definition: event.c:447
static void fr_event_fd_noop(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, UNUSED void *uctx)
Placeholder callback to avoid branches in service loop.
Definition: event.c:657
fr_dlist_t entry
If the fr_event_pid is in the detached, reap state, it's inserted into a list associated with the eve...
Definition: event.c:363
bool fr_event_loop_exiting(fr_event_list_t *el)
Check to see whether the event loop is in the process of exiting.
Definition: event.c:2755
fr_dlist_t entry
Linked list of callback.
Definition: event.c:394
int _fr_event_filter_update(NDEBUG_LOCATION_ARGS fr_event_list_t *el, int fd, fr_event_filter_t filter, fr_event_update_t const updates[])
Suspend/resume a subset of filters.
Definition: event.c:1000
char const * file
Source file this event was last updated in.
Definition: event.c:385
int _fr_event_fd_move(NDEBUG_LOCATION_ARGS fr_event_list_t *dst, fr_event_list_t *src, int fd, fr_event_filter_t filter)
Move a file descriptor event from one event list to another.
Definition: event.c:949
fr_event_func_map_entry_t ** ev_to_func
Function -> Event maps in index order.
Definition: event.c:169
int _fr_event_fd_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd, fr_event_fd_cb_t read_fn, fr_event_fd_cb_t write_fn, fr_event_error_cb_t error, void *uctx)
Associate I/O callbacks with a file descriptor.
Definition: event.c:1233
fr_event_fd_type_t
Definition: event.c:124
@ FR_EVENT_FD_FILE
is a file.
Definition: event.c:126
@ FR_EVENT_FD_DIRECTORY
is a directory.
Definition: event.c:127
@ FR_EVENT_FD_SOCKET
is a socket.
Definition: event.c:125
fr_event_pid_t const * pid_ev
pid_ev this reaper is bound to.
Definition: event.c:361
fr_event_funcs_t active
Active filter functions.
Definition: event.c:307
int fr_event_pre_insert(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx)
Add a pre-event callback to the event list.
Definition: event.c:2259
int fr_event_timer_run(fr_event_list_t *el, fr_time_t *when)
Run a single scheduled timer event.
Definition: event.c:2363
static void _fr_event_pid_early_exit(fr_event_list_t *el, void *uctx)
Called on the next loop through the event loop when inserting an EVFILT_PROC event fails.
Definition: event.c:1710
static void event_user_eval(fr_event_list_t *el, struct kevent *kev)
Definition: event.c:2159
fr_event_list_t * el
Event list this event belongs to.
Definition: event.c:299
static fr_table_num_sorted_t const kevent_filter_table[]
Definition: event.c:76
TALLOC_CTX * linked_ctx
talloc ctx this event was bound to.
Definition: event.c:318
static void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags)
Definition: event.c:2536
void fr_event_loop_exit(fr_event_list_t *el, int code)
Signal an event loop exit with the specified code.
Definition: event.c:2744
#define FR_EV_BATCH_FDS
Definition: event.c:57
void * uctx
Context pointer to pass to each file descriptor callback.
Definition: event.c:342
static int _event_pid_free(fr_event_pid_t *ev)
Remove PID wait event from kevent if the fr_event_pid_t is freed.
Definition: event.c:1643
fr_event_list_t * el
Event list this event belongs to.
Definition: event.c:360
int fd
File descriptor we're listening for events on.
Definition: event.c:301
size_t offset
Offset of function pointer in structure.
Definition: event.c:156
int will_exit
Will exit on next call to fr_event_corral.
Definition: event.c:415
int fr_event_fd_delete(fr_event_list_t *el, int fd, fr_event_filter_t filter)
Remove a file descriptor from the event loop.
Definition: event.c:1260
fr_dlist_t entry
Linked list of callback.
Definition: event.c:403
int fr_event_loop(fr_event_list_t *el)
Run an event loop.
Definition: event.c:2766
#define EVENT_DEBUG(...)
Definition: event.c:73
int _fr_event_timer_at(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p, fr_time_t when, fr_event_timer_cb_t callback, void const *uctx)
Insert a timer event into an event list.
Definition: event.c:1451
int exit
If non-zero event loop will prevent the addition of new events, and will return immediately from the ...
Definition: event.c:416
fr_rb_node_t node
Entry in the tree of file descriptor handles.
Definition: event.c:295
fr_event_time_source_t time
Where our time comes from.
Definition: event.c:420
fr_dlist_head_t fd_to_free
File descriptor events pending deletion.
Definition: event.c:439
bool dispatch
Whether the event list is currently dispatching events.
Definition: event.c:422
struct kevent events[FR_EV_BATCH_FDS]
Definition: event.c:434
int _fr_event_filter_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd, fr_event_filter_t filter, void *funcs, fr_event_error_cb_t error, void *uctx)
Insert a filter for the specified fd.
Definition: event.c:1077
#define NOTE_EXITSTATUS
TALLOC_CTX * linked_ctx
talloc ctx this event was bound to.
Definition: event.c:108
fr_event_fd_t * fr_event_fd_handle(fr_event_list_t *el, int fd, fr_event_filter_t filter)
Get the opaque event handle from a file descriptor.
Definition: event.c:1296
fr_event_pid_t const ** parent
Definition: event.c:339
fr_lst_t * times
of timer events to be executed.
Definition: event.c:412
static int _event_user_delete(fr_event_user_t *ev)
Memory will not be freed if we fail to remove the event from the kqueue.
Definition: event.c:2141
fr_time_t when
When this timer should fire.
Definition: event.c:103
fr_event_func_idx_type_t
Definition: event.c:134
@ FR_EVENT_FUNC_IDX_FILTER
Sign flip is performed i.e. -1 = 0The filter is used / as the index in the ev to func index.
Definition: event.c:137
@ FR_EVENT_FUNC_IDX_NONE
Definition: event.c:135
@ FR_EVENT_FUNC_IDX_FFLAGS
The bit position of the flags in FFLAGS is used to provide the index.
Definition: event.c:139
int fr_event_fd_armour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
Armour an FD.
Definition: event.c:1342
fr_event_list_t * el
Event list containing this timer.
Definition: event.c:116
uint32_t fflags
fflags to pass to filter.
Definition: event.c:160
A file descriptor/filter event.
Definition: event.c:294
Specifies a mapping between a function pointer in a structure and its respective event.
Definition: event.c:155
Stores all information relating to an event list.
Definition: event.c:411
Hold additional information for automatically reaped PIDs.
Definition: event.c:359
Callbacks to perform after all timers and FDs have been checked.
Definition: event.c:402
Callbacks to perform when the event handler is about to check the events.
Definition: event.c:393
A timer event.
Definition: event.c:102
Callbacks for kevent() user events.
Definition: event.c:375
int fr_debug_lvl
Definition: log.c:43
fr_log_t default_log
Definition: log.c:291
void fr_vlog(fr_log_t const *log, fr_log_type_t type, char const *file, int line, char const *fmt, va_list ap)
Send a server log message to its destination.
Definition: log.c:344
@ L_DBG_LVL_3
3rd highest priority debug messages (-xxx | -Xx).
Definition: log.h:72
@ L_DBG
Only displayed when debugging is enabled.
Definition: log.h:59
int fr_lst_extract(fr_lst_t *lst, void *data)
Remove an element from an LST.
Definition: lst.c:715
void * fr_lst_iter_next(fr_lst_t *lst, fr_lst_iter_t *iter)
Get the next entry in an LST.
Definition: lst.c:785
int fr_lst_insert(fr_lst_t *lst, void *data)
Definition: lst.c:731
unsigned int fr_lst_num_elements(fr_lst_t *lst)
Definition: lst.c:750
void * fr_lst_peek(fr_lst_t *lst)
Definition: lst.c:701
void * fr_lst_iter_init(fr_lst_t *lst, fr_lst_iter_t *iter)
Iterate over entries in LST.
Definition: lst.c:766
Definition: lst.c:60
#define fr_lst_talloc_alloc(_ctx, _cmp, _talloc_type, _field, _init)
Creates an LST that verifies elements are of a specific talloc type.
Definition: lst.h:80
fr_lst_index_t fr_lst_iter_t
Definition: lst.h:45
unsigned int fr_lst_index_t
Definition: lst.h:43
static uint8_t fr_high_bit_pos(uint64_t num)
Find the highest order high bit in an unsigned 64 bit integer.
Definition: math.h:36
unsigned short uint16_t
Definition: merged_model.c:31
unsigned int uint32_t
Definition: merged_model.c:33
long int ssize_t
Definition: merged_model.c:24
unsigned char uint8_t
Definition: merged_model.c:30
unsigned long int size_t
Definition: merged_model.c:25
static size_t array[MY_ARRAY_SIZE]
void fr_rand_init(void)
Definition: rand.c:34
uint32_t randrsl[256]
Definition: rand.h:40
uint32_t randcnt
Definition: rand.h:39
void * fr_rb_iter_next_inorder(fr_rb_iter_inorder_t *iter)
Return the next node.
Definition: rb.c:850
void * fr_rb_iter_init_inorder(fr_rb_iter_inorder_t *iter, fr_rb_tree_t *tree)
Initialise an in-order iterator.
Definition: rb.c:824
uint32_t fr_rb_num_elements(fr_rb_tree_t *tree)
#define fr_rb_inline_talloc_alloc(_ctx, _type, _field, _data_cmp, _data_free)
Allocs a red black that verifies elements are of a specific talloc type.
Definition: rb.h:246
#define fr_rb_inline_alloc(_ctx, _type, _field, _data_cmp, _data_free)
Allocs a red black tree.
Definition: rb.h:271
bool fr_rb_insert(fr_rb_tree_t *tree, void const *data)
bool fr_rb_delete(fr_rb_tree_t *tree, void const *data)
void * fr_rb_find(fr_rb_tree_t const *tree, void const *data)
Iterator structure for in-order traversal of an rbtree.
Definition: rb.h:321
The main red black tree structure.
Definition: rb.h:73
return count
Definition: module.c:163
fr_assert(0)
char const * fr_syserror(int num)
Guaranteed to be thread-safe version of strerror.
Definition: syserror.c:243
#define fr_table_str_by_value(_table, _number, _def)
Convert an integer to a string.
Definition: table.h:772
An element in a lexicographically sorted array of name to num mappings.
Definition: table.h:49
int talloc_link_ctx(TALLOC_CTX *parent, TALLOC_CTX *child)
Link two different parent and child contexts, so the child is freed before the parent.
Definition: talloc.c:171
static TALLOC_CTX * talloc_init_const(char const *name)
Allocate a top level chunk with a constant name.
Definition: talloc.h:112
#define fr_time_delta_to_timespec(_delta)
Convert a delta to a timespec.
Definition: time.h:666
static int8_t fr_time_delta_cmp(fr_time_delta_t a, fr_time_delta_t b)
Compare two fr_time_delta_t values.
Definition: time.h:930
static int64_t fr_time_unwrap(fr_time_t time)
Definition: time.h:146
static fr_time_delta_t fr_time_delta_from_sec(int64_t sec)
Definition: time.h:590
#define fr_time_delta_wrap(_time)
Definition: time.h:152
#define fr_time_wrap(_time)
Definition: time.h:145
#define fr_time_lteq(_a, _b)
Definition: time.h:240
#define fr_time_delta_ispos(_a)
Definition: time.h:290
static int64_t fr_time_to_usec(fr_time_t when)
Convert an fr_time_t (internal time) to number of usec since the unix epoch (wallclock time)
Definition: time.h:701
#define fr_time_add(_a, _b)
Add a time/time delta together.
Definition: time.h:196
#define fr_time_gt(_a, _b)
Definition: time.h:237
#define USEC
Definition: time.h:380
#define fr_time_sub(_a, _b)
Subtract one time from another.
Definition: time.h:229
static int8_t fr_time_cmp(fr_time_t a, fr_time_t b)
Compare two fr_time_t values.
Definition: time.h:916
A time delta, a difference in time measured in nanoseconds.
Definition: time.h:80
"server local" time.
Definition: time.h:69
close(uq->fd)
static fr_event_list_t * el
void fr_strerror_clear(void)
Clears all pending messages from the talloc pools.
Definition: strerror.c:577
char const * fr_strerror(void)
Get the last library error.
Definition: strerror.c:554
#define fr_strerror_printf(_fmt,...)
Log to thread local error buffer.
Definition: strerror.h:64
#define fr_strerror_printf_push(_fmt,...)
Add a message to an existing stack of messages at the tail.
Definition: strerror.h:84
#define fr_strerror_const_push(_msg)
Definition: strerror.h:227
#define fr_strerror_const(_msg)
Definition: strerror.h:223
int nonnull(2, 5))
int format(printf, 5, 0))
static size_t char ** out
Definition: value.h:997