The FreeRADIUS server  $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
event.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
15  */
16 
17 /** Wrapper around libkqueue to make managing events easier
18  *
19  * Non-thread-safe event handling specific to FreeRADIUS.
20  *
21  * By non-thread-safe we mean multiple threads can't insert/delete
22  * events concurrently into the same event list without synchronization.
23  *
24  * @file src/lib/util/event.c
25  *
26  * @copyright 2007-2016 The FreeRADIUS server project
27  * @copyright 2016 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
28  * @copyright 2007 Alan DeKok (aland@freeradius.org)
29  */
30 RCSID("$Id: 085eba311957ede61b99cd5f829ae90d9596c731 $")
31 
32 #include <freeradius-devel/util/dlist.h>
33 #include <freeradius-devel/util/event.h>
34 #include <freeradius-devel/util/lst.h>
35 #include <freeradius-devel/util/log.h>
36 #include <freeradius-devel/util/rb.h>
37 #include <freeradius-devel/util/strerror.h>
38 #include <freeradius-devel/util/syserror.h>
39 #include <freeradius-devel/util/table.h>
40 #include <freeradius-devel/util/token.h>
41 #include <freeradius-devel/util/atexit.h>
42 
43 #include <sys/stat.h>
44 #include <sys/wait.h>
45 #include <pthread.h>
46 
47 #ifdef NDEBUG
48 /*
49  * Turn off documentation warnings as file/line
50  * args aren't used for non-debug builds.
51  */
53 DIAG_OFF(documentation)
55 #endif
56 
57 #define FR_EV_BATCH_FDS (256)
58 
59 DIAG_OFF(unused-macros)
60 #define fr_time() static_assert(0, "Use el->time for event loop timing")
61 DIAG_ON(unused-macros)
62 
63 #if !defined(SO_GET_FILTER) && defined(SO_ATTACH_FILTER)
64 # define SO_GET_FILTER SO_ATTACH_FILTER
65 #endif
66 
67 #ifdef WITH_EVENT_DEBUG
68 # define EVENT_DEBUG(fmt, ...) printf("EVENT:");printf(fmt, ## __VA_ARGS__);printf("\n");
69 # ifndef EVENT_REPORT_FREQ
70 # define EVENT_REPORT_FREQ 5
71 # endif
72 #else
73 # define EVENT_DEBUG(...)
74 #endif
75 
77 #ifdef EVFILT_AIO
78  { L("EVFILT_AIO"), EVFILT_AIO },
79 #endif
80 #ifdef EVFILT_EXCEPT
81  { L("EVFILT_EXCEPT"), EVFILT_EXCEPT },
82 #endif
83 #ifdef EVFILT_MACHPORT
84  { L("EVFILT_MACHPORT"), EVFILT_MACHPORT },
85 #endif
86  { L("EVFILT_PROC"), EVFILT_PROC },
87  { L("EVFILT_READ"), EVFILT_READ },
88  { L("EVFILT_SIGNAL"), EVFILT_SIGNAL },
89  { L("EVFILT_TIMER"), EVFILT_TIMER },
90  { L("EVFILT_VNODE"), EVFILT_VNODE },
91  { L("EVFILT_WRITE"), EVFILT_WRITE }
92 };
94 
95 #ifdef EVFILT_LIBKQUEUE
96 static int log_conf_kq;
97 #endif
98 
99 /** A timer event
100  *
101  */
103  fr_time_t when; //!< When this timer should fire.
104 
105  fr_event_timer_cb_t callback; //!< Callback to execute when the timer fires.
106  void const *uctx; //!< Context pointer to pass to the callback.
107 
108  TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to.
109 
110  fr_event_timer_t const **parent; //!< A pointer to the parent structure containing the timer
111  ///< event.
112 
113  fr_lst_index_t lst_id; //!< Where to store opaque lst data.
114  fr_dlist_t entry; //!< List of deferred timer events.
115 
116  fr_event_list_t *el; //!< Event list containing this timer.
117 
118 #ifndef NDEBUG
119  char const *file; //!< Source file this event was last updated in.
120  int line; //!< Line this event was last updated on.
121 #endif
122 };
123 
124 typedef enum {
125  FR_EVENT_FD_SOCKET = 1, //!< is a socket.
126  FR_EVENT_FD_FILE = 2, //!< is a file.
127  FR_EVENT_FD_DIRECTORY = 4, //!< is a directory.
128 
129 #ifdef SO_GET_FILTER
130  FR_EVENT_FD_PCAP = 8,
131 #endif
133 
134 typedef enum {
136 
137  FR_EVENT_FUNC_IDX_FILTER, //!< Sign flip is performed i.e. -1 = 0The filter is used
138  //// as the index in the ev to func index.
139  FR_EVENT_FUNC_IDX_FFLAGS //!< The bit position of the flags in FFLAGS
140  ///< is used to provide the index.
141  ///< i.e. 0x01 -> 0, 0x02 -> 1, 0x08 -> 3 etc..
143 
144 #ifndef SO_GET_FILTER
145 # define FR_EVENT_FD_PCAP 0
146 #endif
147 
148 /** Specifies a mapping between a function pointer in a structure and its respective event
149  *
150  * If the function pointer at the specified offset is set, then a matching event
151  * will be added.
152  *
153  * If the function pointer is NULL, then any existing events will be removed.
154  */
155 typedef struct {
156  size_t offset; //!< Offset of function pointer in structure.
157  char const *name; //!< Name of the event.
158  int16_t filter; //!< Filter to apply.
159  uint16_t flags; //!< Flags to use for inserting event.
160  uint32_t fflags; //!< fflags to pass to filter.
161  int type; //!< Type this filter applies to.
162  bool coalesce; //!< Coalesce this map with the next.
164 
165 typedef struct {
166  fr_event_func_idx_type_t idx_type; //!< What type of index we use for
167  ///< event to function mapping.
168  fr_event_func_map_entry_t *func_to_ev; //!< Function -> Event maps coalesced, out of order.
169  fr_event_func_map_entry_t **ev_to_func; //!< Function -> Event maps in index order.
171 
173  [FR_EVENT_FILTER_IO] = {
175  .func_to_ev = (fr_event_func_map_entry_t[]){
176  {
177  .offset = offsetof(fr_event_io_func_t, read),
178  .name = "read",
179  .filter = EVFILT_READ,
180  .flags = EV_ADD | EV_ENABLE,
181 #ifdef NOTE_NONE
182  .fflags = NOTE_NONE,
183 #else
184  .fflags = 0,
185 #endif
187  },
188  {
189  .offset = offsetof(fr_event_io_func_t, write),
190  .name = "write",
191  .filter = EVFILT_WRITE,
192  .flags = EV_ADD | EV_ENABLE,
193  .fflags = 0,
195  },
196  { 0 }
197  }
198  },
200  .idx_type = FR_EVENT_FUNC_IDX_FFLAGS,
201  .func_to_ev = (fr_event_func_map_entry_t[]){
202  {
203  .offset = offsetof(fr_event_vnode_func_t, delete),
204  .name = "delete",
205  .filter = EVFILT_VNODE,
206  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
207  .fflags = NOTE_DELETE,
209  .coalesce = true
210  },
211  {
212  .offset = offsetof(fr_event_vnode_func_t, write),
213  .name = "write",
214  .filter = EVFILT_VNODE,
215  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
216  .fflags = NOTE_WRITE,
217  .type = FR_EVENT_FD_FILE,
218  .coalesce = true
219  },
220  {
221  .offset = offsetof(fr_event_vnode_func_t, extend),
222  .name = "extend",
223  .filter = EVFILT_VNODE,
224  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
225  .fflags = NOTE_EXTEND,
227  .coalesce = true
228  },
229  {
230  .offset = offsetof(fr_event_vnode_func_t, attrib),
231  .name = "attrib",
232  .filter = EVFILT_VNODE,
233  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
234  .fflags = NOTE_ATTRIB,
235  .type = FR_EVENT_FD_FILE,
236  .coalesce = true
237  },
238  {
239  .offset = offsetof(fr_event_vnode_func_t, link),
240  .name = "link",
241  .filter = EVFILT_VNODE,
242  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
243  .fflags = NOTE_LINK,
244  .type = FR_EVENT_FD_FILE,
245  .coalesce = true
246  },
247  {
248  .offset = offsetof(fr_event_vnode_func_t, rename),
249  .name = "rename",
250  .filter = EVFILT_VNODE,
251  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
252  .fflags = NOTE_RENAME,
253  .type = FR_EVENT_FD_FILE,
254  .coalesce = true
255  },
256 #ifdef NOTE_REVOKE
257  {
258  .offset = offsetof(fr_event_vnode_func_t, revoke),
259  .name = "revoke",
260  .filter = EVFILT_VNODE,
261  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
262  .fflags = NOTE_REVOKE,
263  .type = FR_EVENT_FD_FILE,
264  .coalesce = true
265  },
266 #endif
267 #ifdef NOTE_FUNLOCK
268  {
269  .offset = offsetof(fr_event_vnode_func_t, funlock),
270  .name = "funlock",
271  .filter = EVFILT_VNODE,
272  .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
273  .fflags = NOTE_FUNLOCK,
274  .type = FR_EVENT_FD_FILE,
275  .coalesce = true
276  },
277 #endif
278  { 0 }
279  }
280  }
281 };
282 
284  { L("directory"), FR_EVENT_FD_DIRECTORY },
285  { L("file"), FR_EVENT_FD_FILE },
286  { L("pcap"), FR_EVENT_FD_PCAP },
287  { L("socket"), FR_EVENT_FD_SOCKET }
288 };
290 
291 /** A file descriptor/filter event
292  *
293  */
294 struct fr_event_fd {
295  fr_rb_node_t node; //!< Entry in the tree of file descriptor handles.
296  ///< this should really go away and we should pass around
297  ///< handles directly.
298 
299  fr_event_list_t *el; //!< Event list this event belongs to.
301  int fd; //!< File descriptor we're listening for events on.
302 
303  fr_event_fd_type_t type; //!< Type of events we're interested in.
304 
305  int sock_type; //!< The type of socket SOCK_STREAM, SOCK_RAW etc...
306 
307  fr_event_funcs_t active; //!< Active filter functions.
308  fr_event_funcs_t stored; //!< Stored (set, but inactive) filter functions.
309 
310  fr_event_error_cb_t error; //!< Callback for when an error occurs on the FD.
311 
312  fr_event_func_map_t const *map; //!< Function map between #fr_event_funcs_t and kevent filters.
313 
314  bool is_registered; //!< Whether this fr_event_fd_t's FD has been registered with
315  ///< kevent. Mostly for debugging.
316 
317  void *uctx; //!< Context pointer to pass to each file descriptor callback.
318  TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to.
319 
320  fr_dlist_t entry; //!< Entry in free list.
321 
322 #ifndef NDEBUG
323  uintptr_t armour; //!< protection flag from being deleted.
324 #endif
325 
326 #ifndef NDEBUG
327  char const *file; //!< Source file this event was last updated in.
328  int line; //!< Line this event was last updated on.
329 #endif
330 };
331 
332 struct fr_event_pid {
333  fr_event_list_t *el; //!< Event list this event belongs to.
334 
335  bool is_registered; //!< Whether this user event has been registered
336  ///< with the event loop.
337 
338  pid_t pid; //!< child to wait for
340 
341  fr_event_pid_cb_t callback; //!< callback to run when the child exits
342  void *uctx; //!< Context pointer to pass to each file descriptor callback.
343 
344  /** Fields that are only used if we're being triggered by a user event
345  */
346  struct {
347  fr_event_user_t *ev; //!< Fallback user event we use to raise a PID event when
348  ///< a race occurs with kevent.
349  int status; //!< Status we got from waitid.
351 #ifndef NDEBUG
352  char const *file; //!< Source file this event was last updated in.
353  int line; //!< Line this event was last updated on.
354 #endif
355 };
356 
357 /** Hold additional information for automatically reaped PIDs
358  */
359 typedef struct {
360  fr_event_list_t *el; //!< Event list this event belongs to.
361  fr_event_pid_t const *pid_ev; //!< pid_ev this reaper is bound to.
362 
363  fr_dlist_t entry; //!< If the fr_event_pid is in the detached, reap state,
364  ///< it's inserted into a list associated with the event.
365  //!< We then send SIGKILL, and forcefully reap the process
366  ///< on exit.
367 
368  fr_event_pid_cb_t callback; //!< callback to run when the child exits
369  void *uctx; //!< Context pointer to pass to each file descriptor callback.
371 
372 /** Callbacks for kevent() user events
373  *
374  */
376  fr_event_list_t *el; //!< Event list this event belongs to.
377 
378  bool is_registered; //!< Whether this user event has been registered
379  ///< with the event loop.
380 
381  fr_event_user_cb_t callback; //!< The callback to call.
382  void *uctx; //!< Context for the callback.
383 
384 #ifndef NDEBUG
385  char const *file; //!< Source file this event was last updated in.
386  int line; //!< Line this event was last updated on.
387 #endif
388 };
389 
390 /** Callbacks to perform when the event handler is about to check the events
391  *
392  */
393 typedef struct {
394  fr_dlist_t entry; //!< Linked list of callback.
395  fr_event_status_cb_t callback; //!< The callback to call.
396  void *uctx; //!< Context for the callback.
398 
399 /** Callbacks to perform after all timers and FDs have been checked
400  *
401  */
402 typedef struct {
403  fr_dlist_t entry; //!< Linked list of callback.
404  fr_event_timer_cb_t callback; //!< The callback to call.
405  void *uctx; //!< Context for the callback.
407 
408 /** Stores all information relating to an event list
409  *
410  */
412  fr_lst_t *times; //!< of timer events to be executed.
413  fr_rb_tree_t *fds; //!< Tree used to track FDs with filters in kqueue.
414 
415  int will_exit; //!< Will exit on next call to fr_event_corral.
416  int exit; //!< If non-zero event loop will prevent the addition
417  ///< of new events, and will return immediately
418  ///< from the corral/service function.
419 
420  fr_event_time_source_t time; //!< Where our time comes from.
421  fr_time_t now; //!< The last time the event list was serviced.
422  bool dispatch; //!< Whether the event list is currently dispatching events.
423 
424  int num_fd_events; //!< Number of events in this event list.
425 
426  int kq; //!< instance associated with this event list.
427 
428  fr_dlist_head_t pre_callbacks; //!< callbacks when we may be idle...
429  fr_dlist_head_t post_callbacks; //!< post-processing callbacks
430 
431  fr_dlist_head_t pid_to_reap; //!< A list of all orphaned child processes we're
432  ///< waiting to reap.
433 
434  struct kevent events[FR_EV_BATCH_FDS]; /* so it doesn't go on the stack every time */
435 
436  bool in_handler; //!< Deletes should be deferred until after the
437  ///< handlers complete.
438 
439  fr_dlist_head_t fd_to_free; //!< File descriptor events pending deletion.
440  fr_dlist_head_t ev_to_add; //!< dlist of events to add
441 
442 #ifdef WITH_EVENT_DEBUG
443  fr_event_timer_t const *report; //!< Report event.
444 #endif
445 };
446 
448 {
449  switch (map->idx_type) {
450  default:
451  return;
452 
453  /*
454  * - Figure out the lowest filter value
455  * - Invert it
456  * - Allocate an array
457  * - Populate the array
458  */
460  {
461  int low = 0;
463 
464  for (entry = map->func_to_ev; entry->name; entry++) if (entry->filter < low) low = entry->filter;
465 
466  map->ev_to_func = talloc_zero_array(NULL, fr_event_func_map_entry_t *, ~low + 1);
467  if (unlikely(!map->ev_to_func)) abort();
468 
469  for (entry = map->func_to_ev; entry->name; entry++) map->ev_to_func[~entry->filter] = entry;
470  }
471  break;
472 
473  /*
474  * - Figure out the highest bit position
475  * - Allocate an array
476  * - Populate the array
477  */
479  {
480  uint8_t high = 0, pos;
482 
483  for (entry = map->func_to_ev; entry->name; entry++) {
484  pos = fr_high_bit_pos(entry->fflags);
485  if (pos > high) high = pos;
486  }
487 
488  map->ev_to_func = talloc_zero_array(NULL, fr_event_func_map_entry_t *, high);
489  if (unlikely(!map->ev_to_func)) abort();
490 
491  for (entry = map->func_to_ev; entry->name; entry++) {
492  int fflags = entry->fflags;
493 
494  /*
495  * Multiple notes can be associated
496  * with the same function.
497  */
498  while ((pos = fr_high_bit_pos(fflags))) {
499  pos -= 1;
500  map->ev_to_func[pos] = entry;
501  fflags &= ~(1 << pos);
502  }
503  }
504  }
505  break;
506  }
507 }
508 
509 /** Figure out which function to call given a kevent
510  *
511  * This function should be called in a loop until it returns NULL.
512  *
513  * @param[in] ef File descriptor state handle.
514  * @param[in] filter from the kevent.
515  * @param[in,out] fflags from the kevent. Each call will return the function
516  * from the next most significant NOTE_*, with each
517  * NOTE_* before unset from fflags.
518  * @return
519  * - NULL there are no more callbacks to call.
520  * - The next callback to call.
521  */
522 static inline CC_HINT(always_inline) fr_event_fd_cb_t event_fd_func(fr_event_fd_t *ef, int *filter, int *fflags)
523 {
524  fr_event_func_map_t const *map = ef->map;
525 
526 #define GET_FUNC(_ef, _offset) *((fr_event_fd_cb_t const *)((uint8_t const *)&(_ef)->active + _offset))
527 
528  switch (map->idx_type) {
529  default:
530  fr_assert_fail("Invalid index type %i", map->idx_type);
531  return NULL;
532 
534  {
535  int idx;
536 
537  if (!*filter) return NULL;
538 
539  idx = ~*filter; /* Consume the filter */
540  *filter = 0;
541 
542  return GET_FUNC(ef, map->ev_to_func[idx]->offset);
543  }
544 
546  {
547  int our_fflags = *fflags;
548  uint8_t pos = fr_high_bit_pos(our_fflags);
549 
550  if (!pos) return NULL; /* No more fflags to consume */
551  pos -= 1; /* Saves an array element */
552 
553  *fflags = our_fflags & ~(1 << pos); /* Consume the knote */
554 
555  return GET_FUNC(ef, map->ev_to_func[pos]->offset);
556  }
557  }
558 }
559 
560 /** Compare two timer events to see which one should occur first
561  *
562  * @param[in] a the first timer event.
563  * @param[in] b the second timer event.
564  * @return
565  * - +1 if a should occur later than b.
566  * - -1 if a should occur earlier than b.
567  * - 0 if both events occur at the same time.
568  */
569 static int8_t fr_event_timer_cmp(void const *a, void const *b)
570 {
571  fr_event_timer_t const *ev_a = a, *ev_b = b;
572 
573  return fr_time_cmp(ev_a->when, ev_b->when);
574 }
575 
576 /** Compare two file descriptor handles
577  *
578  * @param[in] one the first file descriptor handle.
579  * @param[in] two the second file descriptor handle.
580  * @return CMP(one, two)
581  */
582 static int8_t fr_event_fd_cmp(void const *one, void const *two)
583 {
584  fr_event_fd_t const *a = one, *b = two;
585 
586  CMP_RETURN(a, b, fd);
587 
588  return CMP(a->filter, b->filter);
589 }
590 
591 /** Return the number of file descriptors is_registered with this event loop
592  *
593  */
595 {
596  if (unlikely(!el)) return -1;
597 
598  return fr_rb_num_elements(el->fds);
599 }
600 
601 /** Return the number of timer events currently scheduled
602  *
603  * @param[in] el to return timer events for.
604  * @return number of timer events.
605  */
607 {
608  if (unlikely(!el)) return -1;
609 
610  return fr_lst_num_elements(el->times);
611 }
612 
613 /** Return the kq associated with an event list.
614  *
615  * @param[in] el to return timer events for.
616  * @return kq
617  */
619 {
620  if (unlikely(!el)) return -1;
621 
622  return el->kq;
623 }
624 
625 /** Get the current server time according to the event list
626  *
627  * If the event list is currently dispatching events, we return the time
628  * this iteration of the event list started.
629  *
630  * If the event list is not currently dispatching events, we return the
631  * current system time.
632  *
633  * @param[in] el to get time from.
634  * @return the current time according to the event list.
635  */
637 {
638  if (el->dispatch) {
639  return el->now;
640  } else {
641  return el->time();
642  }
643 }
644 
645 /** Placeholder callback to avoid branches in service loop
646  *
647  * This is set in place of any NULL function pointers, so that the event loop doesn't
648  * SEGV if a filter callback function is unset between corral and service.
649  */
650 static void fr_event_fd_noop(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, UNUSED void *uctx)
651 {
652  return;
653 }
654 
655 /** Build a new evset based on function pointers present
656  *
657  * @note The contents of active functions may be inconsistent if this function errors. But the
658  * only time that will occur is if the caller passed invalid arguments.
659  *
660  * @param[in] el we're building events for.
661  * @param[out] out_kev where to write the evset.
662  * @param[in] outlen length of output buffer.
663  * @param[out] active The set of function pointers with active filters.
664  * @param[in] ef event to insert.
665  * @param[in] new Functions to map to filters.
666  * @param[in] prev Previous set of functions mapped to filters.
667  * @return
668  * - >= 0 the number of changes written to out.
669  * - < 0 an error occurred.
670  */
672 #ifndef WITH_EVENT_DEBUG
673  UNUSED
674 #endif
676  struct kevent out_kev[], size_t outlen, fr_event_funcs_t *active,
677  fr_event_fd_t *ef,
678  fr_event_funcs_t const *new, fr_event_funcs_t const *prev)
679 {
680  struct kevent *out = out_kev, *end = out + outlen;
681  fr_event_func_map_entry_t const *map;
682  struct kevent add[10], *add_p = add;
683  size_t i;
684 
685  EVENT_DEBUG("%p - Building new evset for FD %i (new %p, prev %p)", el, ef->fd, new, prev);
686 
687  /*
688  * Iterate over the function map, setting/unsetting
689  * filters and filter flags.
690  */
691  for (map = ef->map->func_to_ev; map->name; map++) {
692  bool has_current_func = false;
693  bool has_prev_func = false;
694  uint32_t current_fflags = 0;
695  uint32_t prev_fflags = 0;
696 
697  do {
698  fr_event_fd_cb_t prev_func;
699  fr_event_fd_cb_t new_func;
700 
701  /*
702  * If the previous value was the 'noop'
703  * callback, it's identical to being unset.
704  */
705  prev_func = *(fr_event_fd_cb_t const *)((uint8_t const *)prev + map->offset);
706  if (prev_func && (prev_func != fr_event_fd_noop)) {
707  EVENT_DEBUG("\t%s prev set (%p)", map->name, prev_func);
708  prev_fflags |= map->fflags;
709  has_prev_func = true;
710  } else {
711  EVENT_DEBUG("\t%s prev unset", map->name);
712  }
713 
714  new_func = *(fr_event_fd_cb_t const *)((uint8_t const *)new + map->offset);
715  if (new_func && (new_func != fr_event_fd_noop)) {
716  EVENT_DEBUG("\t%s curr set (%p)", map->name, new_func);
717  current_fflags |= map->fflags;
718  has_current_func = true;
719 
720  /*
721  * Check the filter will work for the
722  * type of file descriptor specified.
723  */
724  if (!(map->type & ef->type)) {
725  fr_strerror_printf("kevent %s (%s), can't be applied to fd of type %s",
726  map->name,
729  map->type, "<INVALID>"));
730  return -1;
731  }
732 
733  /*
734  * Mark this filter function as active
735  */
736  memcpy((uint8_t *)active + map->offset, (uint8_t const *)new + map->offset,
737  sizeof(fr_event_fd_cb_t));
738  } else {
739  EVENT_DEBUG("\t%s curr unset", map->name);
740 
741  /*
742  * Mark this filter function as inactive
743  * by setting it to the 'noop' callback.
744  */
745  *((fr_event_fd_cb_t *)((uint8_t *)active + map->offset)) = fr_event_fd_noop;
746  }
747 
748  if (!(map + 1)->coalesce) break;
749  map++;
750  } while (1);
751 
752  if (out > end) {
753  fr_strerror_const("Out of memory to store kevent filters");
754  return -1;
755  }
756 
757  /*
758  * Upsert if we add a function or change the flags.
759  */
760  if (has_current_func &&
761  (!has_prev_func || (current_fflags != prev_fflags))) {
762  if ((size_t)(add_p - add) >= (NUM_ELEMENTS(add))) {
763  fr_strerror_const("Out of memory to store kevent EV_ADD filters");
764  return -1;
765  }
766  EVENT_DEBUG("\tEV_SET EV_ADD filter %s (%i), flags %i, fflags %i",
768  map->filter, map->flags, current_fflags);
769  EV_SET(add_p++, ef->fd, map->filter, map->flags, current_fflags, 0, ef);
770 
771  /*
772  * Delete if we remove a function.
773  */
774  } else if (!has_current_func && has_prev_func) {
775  EVENT_DEBUG("\tEV_SET EV_DELETE filter %s (%i), flags %i, fflags %i",
777  map->filter, EV_DELETE, 0);
778  EV_SET(out++, ef->fd, map->filter, EV_DELETE, 0, 0, ef);
779  }
780  }
781 
782  /*
783  * kevent is fine with adds/deletes in the same operation
784  * on the same file descriptor, but libkqueue doesn't do
785  * any kind of coalescing or ordering so you get an EEXIST
786  * error.
787  */
788  for (i = 0; i < (size_t)(add_p - add); i++) memcpy(out++, &add[i], sizeof(*out));
789 
790  return out - out_kev;
791 }
792 
793 /** Discover the type of a file descriptor
794  *
795  * This function writes the result of the discovery to the ef->type,
796  * and ef->sock_type fields.
797  *
798  * @param[out] ef to write type data to.
799  * @param[in] fd to discover the type of.
800  * @return
801  * - 0 on success.
802  * - -1 on failure.
803  */
804 static int fr_event_fd_type_set(fr_event_fd_t *ef, int fd)
805 {
806  socklen_t opt_len = sizeof(ef->sock_type);
807 
808  /*
809  * It's a socket or PCAP socket
810  */
811  if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &ef->sock_type, &opt_len) == 0) {
812 #ifdef SO_GET_FILTER
813  opt_len = 0;
814  if (unlikely(getsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, NULL, &opt_len) < 0)) {
815  fr_strerror_printf("Failed determining PF status: %s", fr_syserror(errno));
816  return -1;
817  }
818  if (opt_len) {
819  ef->type = FR_EVENT_FD_PCAP;
820  } else
821 #endif
822  {
823  ef->type = FR_EVENT_FD_SOCKET;
824  }
825 
826  /*
827  * It's a file or directory
828  */
829  } else {
830  struct stat buf;
831 
832  if (errno != ENOTSOCK) {
833  fr_strerror_printf("Failed retrieving socket type: %s", fr_syserror(errno));
834  return -1;
835  }
836 
837  if (fstat(fd, &buf) < 0) {
838  fr_strerror_printf("Failed calling stat() on file: %s", fr_syserror(errno));
839  return -1;
840  }
841 
842  if (S_ISDIR(buf.st_mode)) {
844  } else {
845  ef->type = FR_EVENT_FD_FILE;
846  }
847  }
848  ef->fd = fd;
849 
850  return 0;
851 }
852 
853 /** Remove a file descriptor from the event loop and rbtree but don't explicitly free it
854  *
855  *
856  * @param[in] ef to remove.
857  * @return
858  * - 0 on success.
859  * - -1 on error;
860  */
862 {
863  struct kevent evset[10];
864  int count = 0;
865  fr_event_list_t *el = ef->el;
866  fr_event_funcs_t funcs;
867 
868  /*
869  * Already been removed from the various trees and
870  * the event loop.
871  */
872  if (ef->is_registered) {
873  memset(&funcs, 0, sizeof(funcs));
874 
875  fr_assert(ef->armour == 0);
876 
877  /*
878  * If this fails, it's a pretty catastrophic error.
879  */
880  count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
881  &ef->active, ef, &funcs, &ef->active);
882  if (count > 0) {
883  int ret;
884 
885  /*
886  * If this fails, assert on debug builds.
887  */
888  ret = kevent(el->kq, evset, count, NULL, 0, NULL);
889  if (!fr_cond_assert_msg(ret >= 0,
890  "FD %i was closed without being removed from the KQ: %s",
891  ef->fd, fr_syserror(errno))) {
892  return -1; /* Prevent the free, and leave the fd in the trees */
893  }
894  }
895 
896  fr_rb_delete(el->fds, ef);
897  ef->is_registered = false;
898  }
899 
900  /*
901  * Insert into the deferred free list, event will be
902  * freed later.
903  */
904  if (el->in_handler) {
905  /*
906  * Don't allow the same event to be
907  * inserted into the free list multiple
908  * times.
909  *
910  * This can happen if the same ef is
911  * delivered by multiple filters, i.e.
912  * if EVFILT_READ and EVFILT_WRITE
913  * were both high, and both handlers
914  * attempted to delete the event
915  * we'd need to prevent the event being
916  * inserted into the free list multiple
917  * times.
918  */
920  return -1; /* Will be freed later */
921  } else if (fr_dlist_entry_in_list(&ef->entry)) {
923  }
924 
925  return 0;
926 }
927 
928 /** Move a file descriptor event from one event list to another
929  *
930  * FIXME - Move suspended events too.
931  *
932  * @note Any pending events will not be transferred.
933  *
934  * @param[in] dst Event list to move file descriptor event to.
935  * @param[in] src Event list to move file descriptor from.
936  * @param[in] fd of the event to move.
937  * @param[in] filter of the event to move.
938  * @return
939  * - 0 on success.
940  * - -1 on failure. The event will remain active in the src list.
941  */
943  fr_event_list_t *dst, fr_event_list_t *src, int fd, fr_event_filter_t filter)
944 {
945  fr_event_fd_t *ef;
946  int ret;
947 
948  if (fr_event_loop_exiting(dst)) {
949  fr_strerror_const("Destination event loop exiting");
950  return -1;
951  }
952 
953  /*
954  * Ensure this exists
955  */
956  ef = fr_rb_find(src->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
957  if (unlikely(!ef)) {
958  fr_strerror_printf("No events are registered for fd %i", fd);
959  return -1;
960  }
961 
963  ef->linked_ctx, NULL,
964  dst, ef->fd, ef->filter, &ef->active, ef->error, ef->uctx);
965  if (ret < 0) return -1;
966 
967  (void)fr_event_fd_delete(src, ef->fd, ef->filter);
968 
969  return ret;
970 }
971 
972 
973 /** Suspend/resume a subset of filters
974  *
975  * This function trades producing useful errors for speed.
976  *
977  * An example of suspending the read filter for an FD would be:
978  @code {.c}
979  static fr_event_update_t pause_read[] = {
980  FR_EVENT_SUSPEND(fr_event_io_func_t, read),
981  { 0 }
982  }
983 
984  fr_event_filter_update(el, fd, FR_EVENT_FILTER_IO, pause_read);
985  @endcode
986  *
987  * @param[in] el to update descriptor in.
988  * @param[in] fd to update filters for.
989  * @param[in] filter The type of filter to update.
990  * @param[in] updates An array of updates to toggle filters on/off without removing
991  * the callback function.
992  */
994  fr_event_list_t *el, int fd, fr_event_filter_t filter, fr_event_update_t const updates[])
995 {
996  fr_event_fd_t *ef;
997  size_t i;
998  fr_event_funcs_t curr_active, curr_stored;
999  struct kevent evset[10];
1000  int count = 0;
1001 
1002  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1003  if (unlikely(!ef)) {
1004  fr_strerror_printf("No events are registered for fd %i", fd);
1005  return -1;
1006  }
1007 
1008 #ifndef NDEBUG
1009  ef->file = file;
1010  ef->line = line;
1011 #endif
1012 
1013  /*
1014  * Cheapest way of ensuring this function can error without
1015  * leaving everything in an inconsistent state.
1016  */
1017  memcpy(&curr_active, &ef->active, sizeof(curr_active));
1018  memcpy(&curr_stored, &ef->stored, sizeof(curr_stored));
1019 
1020  /*
1021  * Apply modifications to our copies of the active/stored array.
1022  */
1023  for (i = 0; updates[i].op; i++) {
1024  switch (updates[i].op) {
1025  default:
1026  case FR_EVENT_OP_SUSPEND:
1027  fr_assert(ef->armour == 0); /* can't suspect protected FDs */
1028  memcpy((uint8_t *)&ef->stored + updates[i].offset,
1029  (uint8_t *)&ef->active + updates[i].offset, sizeof(fr_event_fd_cb_t));
1030  memset((uint8_t *)&ef->active + updates[i].offset, 0, sizeof(fr_event_fd_cb_t));
1031  break;
1032 
1033  case FR_EVENT_OP_RESUME:
1034  memcpy((uint8_t *)&ef->active + updates[i].offset,
1035  (uint8_t *)&ef->stored + updates[i].offset, sizeof(fr_event_fd_cb_t));
1036  memset((uint8_t *)&ef->stored + updates[i].offset, 0, sizeof(fr_event_fd_cb_t));
1037  break;
1038  }
1039  }
1040 
1041  count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset), &ef->active,
1042  ef, &ef->active, &curr_active);
1043  if (unlikely(count < 0)) {
1044  error:
1045  memcpy(&ef->active, &curr_active, sizeof(curr_active));
1046  memcpy(&ef->stored, &curr_stored, sizeof(curr_stored));
1047  return -1;
1048  }
1049 
1050  if (count && unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0)) {
1051  fr_strerror_printf("Failed updating filters for FD %i: %s", ef->fd, fr_syserror(errno));
1052  goto error;
1053  }
1054 
1055  return 0;
1056 }
1057 
1058 /** Insert a filter for the specified fd
1059  *
1060  * @param[in] ctx to bind lifetime of the event to.
1061  * @param[out] ef_out Previously allocated ef, or NULL.
1062  * @param[in] el to insert fd callback into.
1063  * @param[in] fd to install filters for.
1064  * @param[in] filter one of the #fr_event_filter_t values.
1065  * @param[in] funcs Structure containing callback functions. If a function pointer
1066  * is set, the equivalent kevent filter will be installed.
1067  * @param[in] error function to call when an error occurs on the fd.
1068  * @param[in] uctx to pass to handler.
1069  */
1071  TALLOC_CTX *ctx, fr_event_fd_t **ef_out,
1072  fr_event_list_t *el, int fd,
1073  fr_event_filter_t filter,
1074  void *funcs, fr_event_error_cb_t error,
1075  void *uctx)
1076 {
1077  ssize_t count;
1078  fr_event_fd_t *ef;
1079  fr_event_funcs_t active;
1080  struct kevent evset[10];
1081 
1082  if (unlikely(!el)) {
1083  fr_strerror_const("Invalid argument: NULL event list");
1084  return -1;
1085  }
1086 
1087  if (unlikely(fd < 0)) {
1088  fr_strerror_printf("Invalid arguments: Bad FD %i", fd);
1089  return -1;
1090  }
1091 
1092  if (unlikely(el->exit)) {
1093  fr_strerror_const("Event loop exiting");
1094  return -1;
1095  }
1096 
1097  if (!ef_out || !*ef_out) {
1098  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1099  } else {
1100  ef = *ef_out;
1101  fr_assert((fd < 0) || (ef->fd == fd));
1102  }
1103 
1104  /*
1105  * Need to free the event to change the talloc link.
1106  *
1107  * This is generally bad. If you hit this
1108  * code path you probably screwed up somewhere.
1109  */
1110  if (unlikely(ef && (ef->linked_ctx != ctx))) TALLOC_FREE(ef);
1111 
1112  /*
1113  * No pre-existing event. Allocate an entry
1114  * for insertion into the rbtree.
1115  */
1116  if (!ef) {
1117  ef = talloc_zero(el, fr_event_fd_t);
1118  if (unlikely(!ef)) {
1119  fr_strerror_const("Out of memory");
1120  return -1;
1121  }
1122  talloc_set_destructor(ef, _event_fd_delete);
1123 
1124  /*
1125  * Bind the lifetime of the event to the specified
1126  * talloc ctx. If the talloc ctx is freed, the
1127  * event will also be freed.
1128  */
1129  if (ctx != el) talloc_link_ctx(ctx, ef);
1130  ef->linked_ctx = ctx;
1131  ef->el = el;
1132 
1133  /*
1134  * Determine what type of file descriptor
1135  * this is.
1136  */
1137  if (fr_event_fd_type_set(ef, fd) < 0) {
1138  free:
1139  talloc_free(ef);
1140  return -1;
1141  }
1142 
1143  /*
1144  * Check the filter value is valid
1145  */
1146  if ((filter > (NUM_ELEMENTS(filter_maps) - 1))) {
1147  not_supported:
1148  fr_strerror_printf("Filter %i not supported", filter);
1149  goto free;
1150  }
1151  ef->map = &filter_maps[filter];
1152  if (ef->map->idx_type == FR_EVENT_FUNC_IDX_NONE) goto not_supported;
1153 
1154  count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
1155  &ef->active, ef, funcs, &ef->active);
1156  if (count < 0) goto free;
1157  if (count && (unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0))) {
1158  fr_strerror_printf("Failed inserting filters for FD %i: %s", fd, fr_syserror(errno));
1159  goto free;
1160  }
1161 
1162  ef->filter = filter;
1163  fr_rb_insert(el->fds, ef);
1164  ef->is_registered = true;
1165 
1166  /*
1167  * Pre-existing event, update the filters and
1168  * functions associated with the file descriptor.
1169  */
1170  } else {
1171  fr_assert(ef->is_registered == true);
1172 
1173  /*
1174  * Take a copy of the current set of active
1175  * functions, so we can error out in a
1176  * consistent state.
1177  */
1178  memcpy(&active, &ef->active, sizeof(ef->active));
1179 
1180  fr_assert((ef->armour == 0) || ef->active.io.read);
1181 
1182  count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
1183  &ef->active, ef, funcs, &ef->active);
1184  if (count < 0) {
1185  error:
1186  memcpy(&ef->active, &active, sizeof(ef->active));
1187  return -1;
1188  }
1189  if (count && (unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0))) {
1190  fr_strerror_printf("Failed modifying filters for FD %i: %s", fd, fr_syserror(errno));
1191  goto error;
1192  }
1193 
1194  /*
1195  * Clear any previously suspended functions
1196  */
1197  memset(&ef->stored, 0, sizeof(ef->stored));
1198  }
1199 
1200 #ifndef NDEBUG
1201  ef->file = file;
1202  ef->line = line;
1203 #endif
1204  ef->error = error;
1205  ef->uctx = uctx;
1206 
1207  if (ef_out) *ef_out = ef;
1208 
1209  return 0;
1210 }
1211 
1212 /** Associate I/O callbacks with a file descriptor
1213  *
1214  * @param[in] ctx to bind lifetime of the event to.
1215  * @param[out] ef_out Where to store the output event
1216  * @param[in] el to insert fd callback into.
1217  * @param[in] fd to install filters for.
1218  * @param[in] read_fn function to call when fd is readable.
1219  * @param[in] write_fn function to call when fd is writable.
1220  * @param[in] error function to call when an error occurs on the fd.
1221  * @param[in] uctx to pass to handler.
1222  * @return
1223  * - 0 on success.
1224  * - -1 on failure.
1225  */
1227  TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd,
1228  fr_event_fd_cb_t read_fn,
1229  fr_event_fd_cb_t write_fn,
1230  fr_event_error_cb_t error,
1231  void *uctx)
1232 {
1233  fr_event_io_func_t funcs = { .read = read_fn, .write = write_fn };
1234 
1235  if (unlikely(!read_fn && !write_fn)) {
1236  fr_strerror_const("Invalid arguments: All callbacks are NULL");
1237  return -1;
1238  }
1239 
1241  ctx, ef_out, el, fd, FR_EVENT_FILTER_IO, &funcs, error, uctx);
1242 }
1243 
1244 /** Remove a file descriptor from the event loop
1245  *
1246  * @param[in] el to remove file descriptor from.
1247  * @param[in] fd to remove.
1248  * @param[in] filter The type of filter to remove.
1249  * @return
1250  * - 0 if file descriptor was removed.
1251  * - <0 on error.
1252  */
1254 {
1255  fr_event_fd_t *ef;
1256 
1257  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1258  if (unlikely(!ef)) {
1259  fr_strerror_printf("No events are registered for fd %i, filter %d", fd, filter);
1260  return -1;
1261  }
1262 
1263  /*
1264  * Free will normally fail if it's
1265  * a deferred free. There is a special
1266  * case for kevent failures though.
1267  *
1268  * We distinguish between the two by
1269  * looking to see if the ef is still
1270  * in the even tree.
1271  *
1272  * Talloc returning -1 guarantees the
1273  * memory has not been freed.
1274  */
1275  if ((talloc_free(ef) == -1) && ef->is_registered) return -1;
1276 
1277  return 0;
1278 }
1279 
1280 /** Get the opaque event handle from a file descriptor
1281  *
1282  * @param[in] el to search for fd/filter in.
1283  * @param[in] fd to search for.
1284  * @param[in] filter to search for.
1285  * @return
1286  * - NULL if no event could be found.
1287  * - The opaque handle representing an fd event.
1288  */
1290 {
1291  fr_event_fd_t *ef;
1292 
1293  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1294  if (unlikely(!ef)) {
1295  fr_strerror_printf("No events are registered for fd %i", fd);
1296  return NULL;
1297  }
1298 
1299  return ef;
1300 }
1301 
1302 /** Returns the appropriate callback function for a given event
1303  *
1304  * @param[in] ef the event filter fd handle.
1305  * @param[in] kq_filter If the callbacks are indexed by filter.
1306  * @param[in] kq_fflags If the callbacks are indexed by NOTES (fflags).
1307  * @return
1308  * - NULL if no event it associated with the given ef/kq_filter or kq_fflags combo.
1309  * - The callback that would be called if an event with this filter/fflag combo was received.
1310  */
1311 fr_event_fd_cb_t fr_event_fd_cb(fr_event_fd_t *ef, int kq_filter, int kq_fflags)
1312 {
1313  return event_fd_func(ef, &kq_filter, &kq_fflags);
1314 }
1315 
1316 /** Returns the uctx associated with an fr_event_fd_t handle
1317  *
1318  */
1320 {
1321  return ef->uctx;
1322 }
1323 
1324 #ifndef NDEBUG
1325 /** Armour an FD
1326  *
1327  * @param[in] el to remove file descriptor from.
1328  * @param[in] fd to remove.
1329  * @param[in] filter The type of filter to remove.
1330  * @param[in] armour The armour to add.
1331  * @return
1332  * - 0 if file descriptor was armoured
1333  * - <0 on error.
1334  */
1335 int fr_event_fd_armour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
1336 {
1337  fr_event_fd_t *ef;
1338 
1339  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1340  if (unlikely(!ef)) {
1341  fr_strerror_printf("No events are registered for fd %i", fd);
1342  return -1;
1343  }
1344 
1345  if (ef->armour != 0) {
1346  fr_strerror_printf("FD %i is already armoured", fd);
1347  return -1;
1348  }
1349 
1350  ef->armour = armour;
1351 
1352  return 0;
1353 }
1354 
1355 /** Unarmour an FD
1356  *
1357  * @param[in] el to remove file descriptor from.
1358  * @param[in] fd to remove.
1359  * @param[in] filter The type of filter to remove.
1360  * @param[in] armour The armour to remove
1361  * @return
1362  * - 0 if file descriptor was unarmoured
1363  * - <0 on error.
1364  */
1365 int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
1366 {
1367  fr_event_fd_t *ef;
1368 
1369  ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1370  if (unlikely(!ef)) {
1371  fr_strerror_printf("No events are registered for fd %i", fd);
1372  return -1;
1373  }
1374 
1375  fr_assert(ef->armour == armour);
1376 
1377  ef->armour = 0;
1378  return 0;
1379 }
1380 #endif
1381 
1382 /** Remove an event from the event loop
1383  *
1384  * @param[in] ev to free.
1385  * @return
1386  * - 0 on success.
1387  * - -1 on failure.
1388  */
1390 {
1391  fr_event_list_t *el = ev->el;
1392  fr_event_timer_t const **ev_p;
1393 
1394  if (fr_dlist_entry_in_list(&ev->entry)) {
1395  (void) fr_dlist_remove(&el->ev_to_add, ev);
1396  } else {
1397  int ret = fr_lst_extract(el->times, ev);
1398  char const *err_file;
1399  int err_line;
1400 
1401 #ifndef NDEBUG
1402  err_file = ev->file;
1403  err_line = ev->line;
1404 #else
1405  err_file = "not-available";
1406  err_line = 0;
1407 #endif
1408 
1409 
1410  /*
1411  * Events MUST be in the lst (or the insertion list).
1412  */
1413  if (!fr_cond_assert_msg(ret == 0,
1414  "Event %p, lst_id %i, allocd %s[%u], was not found in the event lst or "
1415  "insertion list when freed: %s", ev, ev->lst_id, err_file, err_line,
1416  fr_strerror())) return -1;
1417  }
1418 
1419  ev_p = ev->parent;
1420  fr_assert(*(ev->parent) == ev);
1421  *ev_p = NULL;
1422 
1423  return 0;
1424 }
1425 
1426 /** Insert a timer event into an event list
1427  *
1428  * @note The talloc parent of the memory returned in ev_p must not be changed.
1429  * If the lifetime of the event needs to be bound to another context
1430  * this function should be called with the existing event pointed to by
1431  * ev_p.
1432  *
1433  * @param[in] ctx to bind lifetime of the event to.
1434  * @param[in] el to insert event into.
1435  * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
1436  * in a temporal sense, not in a memory structure or dependency sense.
1437  * @param[in] when we should run the event.
1438  * @param[in] callback function to execute if the event fires.
1439  * @param[in] uctx user data to pass to the event.
1440  * @return
1441  * - 0 on success.
1442  * - -1 on failure.
1443  */
1445  TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p,
1446  fr_time_t when, fr_event_timer_cb_t callback, void const *uctx)
1447 {
1448  fr_event_timer_t *ev;
1449 
1450  if (unlikely(!el)) {
1451  fr_strerror_const("Invalid arguments: NULL event list");
1452  return -1;
1453  }
1454 
1455  if (unlikely(!callback)) {
1456  fr_strerror_const("Invalid arguments: NULL callback");
1457  return -1;
1458  }
1459 
1460  if (unlikely(!ev_p)) {
1461  fr_strerror_const("Invalid arguments: NULL ev_p");
1462  return -1;
1463  }
1464 
1465  if (unlikely(el->exit)) {
1466  fr_strerror_const("Event loop exiting");
1467  return -1;
1468  }
1469 
1470  /*
1471  * If there is an event, reuse it instead of freeing it
1472  * and allocating a new one. This is to reduce memory
1473  * churn for repeat events.
1474  */
1475  if (!*ev_p) {
1476  new_event:
1477  ev = talloc_zero(el, fr_event_timer_t);
1478  if (unlikely(!ev)) return -1;
1479 
1480  EVENT_DEBUG("%p - %s[%i] Added new timer %p", el, file, line, ev);
1481 
1482  /*
1483  * Bind the lifetime of the event to the specified
1484  * talloc ctx. If the talloc ctx is freed, the
1485  * event will also be freed.
1486  */
1487  if (ctx != el) talloc_link_ctx(ctx, ev);
1488 
1489  talloc_set_destructor(ev, _event_timer_free);
1490  ev->lst_id = 0;
1491 
1492  } else {
1493  ev = UNCONST(fr_event_timer_t *, *ev_p);
1494 
1495  EVENT_DEBUG("%p - %s[%i] Re-armed timer %p", el, file, line, ev);
1496 
1497  /*
1498  * We can't disarm the linking context due to
1499  * limitations in talloc, so if the linking
1500  * context changes, we need to free the old
1501  * event, and allocate a new one.
1502  *
1503  * Freeing the event also removes it from the lst.
1504  */
1505  if (unlikely(ev->linked_ctx != ctx)) {
1506  talloc_free(ev);
1507  goto new_event;
1508  }
1509 
1510  /*
1511  * Event may have fired, in which case the event
1512  * will no longer be in the event loop, so check
1513  * if it's in the lst before extracting it.
1514  */
1515  if (!fr_dlist_entry_in_list(&ev->entry)) {
1516  int ret;
1517  char const *err_file;
1518  int err_line;
1519 
1520  ret = fr_lst_extract(el->times, ev);
1521 
1522 #ifndef NDEBUG
1523  err_file = ev->file;
1524  err_line = ev->line;
1525 #else
1526  err_file = "not-available";
1527  err_line = 0;
1528 #endif
1529 
1530  /*
1531  * Events MUST be in the lst (or the insertion list).
1532  */
1533  if (!fr_cond_assert_msg(ret == 0,
1534  "Event %p, lst_id %i, allocd %s[%u], was not found in the event "
1535  "lst or insertion list when freed: %s", ev, ev->lst_id,
1536  err_file, err_line, fr_strerror())) return -1;
1537  }
1538  }
1539 
1540  ev->el = el;
1541  ev->when = when;
1542  ev->callback = callback;
1543  ev->uctx = uctx;
1544  ev->linked_ctx = ctx;
1545  ev->parent = ev_p;
1546 #ifndef NDEBUG
1547  ev->file = file;
1548  ev->line = line;
1549 #endif
1550 
1551  if (el->in_handler) {
1552  /*
1553  * Don't allow an event to be inserted
1554  * into the deferred insertion list
1555  * multiple times.
1556  */
1558  } else if (unlikely(fr_lst_insert(el->times, ev) < 0)) {
1559  fr_strerror_const_push("Failed inserting event");
1560  talloc_set_destructor(ev, NULL);
1561  *ev_p = NULL;
1562  talloc_free(ev);
1563  return -1;
1564  }
1565 
1566  *ev_p = ev;
1567 
1568  return 0;
1569 }
1570 
1571 /** Insert a timer event into an event list
1572  *
1573  * @note The talloc parent of the memory returned in ev_p must not be changed.
1574  * If the lifetime of the event needs to be bound to another context
1575  * this function should be called with the existing event pointed to by
1576  * ev_p.
1577  *
1578  * @param[in] ctx to bind lifetime of the event to.
1579  * @param[in] el to insert event into.
1580  * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
1581  * in a temporal sense, not in a memory structure or dependency sense.
1582  * @param[in] delta In how many nanoseconds to wait before should we execute the event.
1583  * @param[in] callback function to execute if the event fires.
1584  * @param[in] uctx user data to pass to the event.
1585  * @return
1586  * - 0 on success.
1587  * - -1 on failure.
1588  */
1590  TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p,
1591  fr_time_delta_t delta, fr_event_timer_cb_t callback, void const *uctx)
1592 {
1594  ctx, el, ev_p, fr_time_add(el->time(), delta), callback, uctx);
1595 }
1596 
1597 /** Delete a timer event from the event list
1598  *
1599  * @param[in] ev_p of the event being deleted.
1600  * @return
1601  * - 0 on success.
1602  * - -1 on failure.
1603  */
1605 {
1606  fr_event_timer_t *ev;
1607  int ret;
1608 
1609  if (unlikely(!*ev_p)) return 0;
1610 
1611  ev = UNCONST(fr_event_timer_t *, *ev_p);
1612  ret = talloc_free(ev);
1613 
1614  /*
1615  * Don't leave a garbage pointer value
1616  * in the parent.
1617  */
1618  if (likely(ret == 0)) *ev_p = NULL;
1619  return 0;
1620 }
1621 
1622 /** Internal timestamp representing when the timer should fire
1623  *
1624  * @return When the timestamp should fire.
1625  */
1627 {
1628  return ev->when;
1629 }
1630 
1631 /** Remove PID wait event from kevent if the fr_event_pid_t is freed
1632  *
1633  * @param[in] ev to free.
1634  * @return 0
1635  */
1637 {
1638  struct kevent evset;
1639 
1640  if (ev->parent) *ev->parent = NULL;
1641  if (!ev->is_registered || (ev->pid < 0)) return 0; /* already deleted from kevent */
1642 
1643  EVENT_DEBUG("%p - Disabling event for PID %u - %p was freed", ev->el, (unsigned int)ev->pid, ev);
1644 
1645  EV_SET(&evset, ev->pid, EVFILT_PROC, EV_DELETE, NOTE_EXIT, 0, ev);
1646 
1647  (void) kevent(ev->el->kq, &evset, 1, NULL, 0, NULL);
1648 
1649  return 0;
1650 }
1651 
1652 /** Evaluate a EVFILT_PROC event
1653  *
1654  */
1655 static inline CC_HINT(always_inline)
1656 void event_pid_eval(fr_event_list_t *el, struct kevent *kev)
1657 {
1658  pid_t pid;
1659  fr_event_pid_t *ev;
1660  fr_event_pid_cb_t callback;
1661  void *uctx;
1662 
1663  EVENT_DEBUG("%p - PID %u exited with status %i",
1664  el, (unsigned int)kev->ident, (unsigned int)kev->data);
1665 
1666  ev = talloc_get_type_abort((void *)kev->udata, fr_event_pid_t);
1667 
1668  fr_assert(ev->pid == (pid_t) kev->ident);
1669  fr_assert((kev->fflags & NOTE_EXIT) != 0);
1670 
1671  pid = ev->pid;
1672  callback = ev->callback;
1673  uctx = ev->uctx;
1674 
1675  ev->is_registered = false; /* so we won't hit kevent again when it's freed */
1676 
1677  /*
1678  * Delete the event before calling it.
1679  *
1680  * This also sets the parent pointer
1681  * to NULL, so the thing that started
1682  * monitoring the process knows the
1683  * handle is no longer valid.
1684  *
1685  * EVFILT_PROC NOTE_EXIT events are always
1686  * oneshot no matter what flags we pass,
1687  * so we're just reflecting the state of
1688  * the kqueue.
1689  */
1690  talloc_free(ev);
1691 
1692  if (callback) callback(el, pid, (int) kev->data, uctx);
1693 }
1694 
1695 /** Called on the next loop through the event loop when inserting an EVFILT_PROC event fails
1696  *
1697  * This is just a trampoleen function which takes the user event and simulates
1698  * an EVFILT_PROC event from it.
1699  *
1700  * @param[in] el That received the event.
1701  * @param[in] uctx An fr_event_pid_t to process.
1702  */
1704 {
1705  fr_event_pid_t *ev = talloc_get_type_abort(uctx, fr_event_pid_t);
1706 
1707  EVENT_DEBUG("%p - PID %ld exited early, triggered through user event", el, (long)ev->pid);
1708 
1709  /*
1710  * Simulate a real struct kevent with the values we
1711  * recorded in fr_event_pid_wait.
1712  */
1713  event_pid_eval(el, &(struct kevent){ .ident = ev->pid, .data = ev->early_exit.status, .fflags = NOTE_EXIT, .udata = ev });
1714 }
1715 
1716 /** Insert a PID event into an event list
1717  *
1718  * @note The talloc parent of the memory returned in ev_p must not be changed.
1719  * If the lifetime of the event needs to be bound to another context
1720  * this function should be called with the existing event pointed to by
1721  * ev_p.
1722  *
1723  * @param[in] ctx to bind lifetime of the event to.
1724  * @param[in] el to insert event into.
1725  * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
1726  * in a temporal sense, not in a memory structure or dependency sense.
1727  * @param[in] pid child PID to wait for
1728  * @param[in] callback function to execute if the event fires.
1729  * @param[in] uctx user data to pass to the event.
1730  * @return
1731  * - 0 on success.
1732  * - -1 on failure.
1733  */
1735  TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p,
1736  pid_t pid, fr_event_pid_cb_t callback, void *uctx)
1737 {
1738  fr_event_pid_t *ev;
1739  struct kevent evset;
1740 
1741  ev = talloc(ctx, fr_event_pid_t);
1742  if (unlikely(ev == NULL)) {
1743  fr_strerror_const("Out of memory");
1744  return -1;
1745  }
1746  *ev = (fr_event_pid_t) {
1747  .el = el,
1748  .pid = pid,
1749  .callback = callback,
1750  .uctx = uctx,
1751  .parent = ev_p,
1752 #ifndef NDEBUG
1753  .file = file,
1754  .line = line,
1755 #endif
1756  };
1757  talloc_set_destructor(ev, _event_pid_free);
1758 
1759  /*
1760  * macOS only, on FreeBSD NOTE_EXIT always provides
1761  * the status anyway.
1762  */
1763 #ifndef NOTE_EXITSTATUS
1764 #define NOTE_EXITSTATUS (0)
1765 #endif
1766 
1767  EVENT_DEBUG("%p - Adding exit waiter for PID %u", el, (unsigned int)pid);
1768 
1769  EV_SET(&evset, pid, EVFILT_PROC, EV_ADD | EV_ONESHOT, NOTE_EXIT | NOTE_EXITSTATUS, 0, ev);
1770  ev->is_registered = true;
1771 
1772  /*
1773  * This deals with the race where the process exited
1774  * before we could add it to the kqueue.
1775  *
1776  * Unless our caller is broken, the process should
1777  * still be available for reaping, so we check
1778  * waitid to see if there is a pending process and
1779  * then call the callback as kqueue would have done.
1780  */
1781  if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
1782  siginfo_t info;
1783  int ret;
1784 
1785  /*
1786  * Ensure we don't accidentally pick up the error
1787  * from kevent.
1788  */
1790 
1791  ev->is_registered = false;
1792 
1793  /*
1794  * If the child exited before kevent() was
1795  * called, we need to get its status via
1796  * waitid().
1797  *
1798  * We don't reap the process here to emulate
1799  * what kqueue does (notify but not reap).
1800  *
1801  * waitid returns >0 on success, 0 if the
1802  * process is still running, and -1 on failure.
1803  *
1804  * If we get a 0, then that's extremely strange
1805  * as adding the kevent failed for a reason
1806  * other than the process already having exited.
1807  *
1808  * On Linux waitid will always return 1 to
1809  * indicate the process exited.
1810  *
1811  * On macOS we seem to get a mix of 1 or 0,
1812  * even if the si_code is one of the values
1813  * we'd consider to indicate that the process
1814  * had completed.
1815  */
1816  ret = waitid(P_PID, pid, &info, WEXITED | WNOHANG | WNOWAIT);
1817  if (ret > 0) {
1818  static fr_table_num_sorted_t const si_codes[] = {
1819  { L("exited"), CLD_EXITED },
1820  { L("killed"), CLD_KILLED },
1821  { L("dumped"), CLD_DUMPED },
1822  { L("trapped"), CLD_TRAPPED },
1823  { L("stopped"), CLD_STOPPED },
1824  { L("continued"), CLD_CONTINUED }
1825  };
1826  static size_t si_codes_len = NUM_ELEMENTS(si_codes);
1827 
1828  switch (info.si_code) {
1829  case CLD_EXITED:
1830  case CLD_KILLED:
1831  case CLD_DUMPED:
1832  EVENT_DEBUG("%p - PID %ld early exit - code %s (%i), status %i",
1833  el, (long)pid, fr_table_str_by_value(si_codes, info.si_code, "<UNKOWN>"),
1834  info.si_code, info.si_status);
1835 
1836  /*
1837  * Record the status for later
1838  */
1839  ev->early_exit.status = info.si_status;
1840 
1841  /*
1842  * The user event acts as a surrogate for
1843  * an EVFILT_PROC event, and will be evaluated
1844  * during the next loop through the event loop.
1845  *
1846  * It will be automatically deleted when the
1847  * fr_event_pid_t is freed.
1848  *
1849  * Previously we tried to evaluate the proc
1850  * callback here directly, but this lead to
1851  * multiple problems, the biggest being that
1852  * setting requests back to resumable failed
1853  * because they were not yet yielded,
1854  * leading to hangs.
1855  */
1856  early_exit:
1857  if (fr_event_user_insert(ev, el, &ev->early_exit.ev, true, _fr_event_pid_early_exit, ev) < 0) {
1858  fr_strerror_printf_push("Failed adding wait for PID %ld, and failed adding "
1859  "backup user event", (long) pid);
1860  error:
1861  talloc_free(ev);
1862  return -1;
1863  }
1864  break;
1865 
1866  default:
1867  fr_strerror_printf("Unexpected code %s (%u) whilst waiting on PID %ld",
1868  fr_table_str_by_value(si_codes, info.si_code, "<UNKOWN>"),
1869  info.si_code, (long) pid);
1870 
1871  goto error;
1872  }
1873  /*
1874  * Failed adding waiter for process, but process has not completed...
1875  *
1876  * This weird, but seems to happen on macOS occasionally.
1877  *
1878  * Add an event to run early exit...
1879  *
1880  * Man pages for waitid say if it returns 0 the info struct can be in
1881  * a nondeterministic state, so there's nothing more to do.
1882  */
1883  } else if (ret == 0) {
1884  goto early_exit;
1885  } else {
1886  /*
1887  * Print this error here, so that the caller gets
1888  * the error from kevent(), and not waitpid().
1889  */
1890  fr_strerror_printf("Failed adding waiter for PID %ld - kevent %s, waitid %s",
1891  (long) pid, fr_syserror(evset.flags), fr_syserror(errno));
1892 
1893  goto error;
1894  }
1895  }
1896 
1897  /*
1898  * Sometimes the caller doesn't care about getting the
1899  * PID. But we still want to clean it up.
1900  */
1901  if (ev_p) *ev_p = ev;
1902 
1903  return 0;
1904 }
1905 
1906 /** Saves some boilerplate...
1907  *
1908  */
1909 static inline CC_HINT(always_inline)
1910 void event_list_reap_run_callback(fr_event_pid_reap_t *reap, pid_t pid, int status)
1911 {
1912  if (reap->callback) reap->callback(reap->el, pid, status, reap->uctx);
1913 }
1914 
1915 /** Does the actual reaping of PIDs
1916  *
1917  */
1918 static void _fr_event_pid_reap_cb(UNUSED fr_event_list_t *el, pid_t pid, int status, void *uctx)
1919 {
1920  fr_event_pid_reap_t *reap = talloc_get_type_abort(uctx, fr_event_pid_reap_t);
1921 
1922  waitpid(pid, &status, WNOHANG); /* Don't block the process if there's a logic error somewhere */
1923 
1924  EVENT_DEBUG("%s - Reaper reaped PID %u, status %u - %p", __FUNCTION__, pid, status, reap);
1925 
1926  event_list_reap_run_callback(reap, pid, status);
1927 
1928  talloc_free(reap);
1929 }
1930 
1932 {
1933  /*
1934  * Clear out the entry in the pid_to_reap
1935  * list if the event was inserted.
1936  */
1937  if (fr_dlist_entry_in_list(&reap->entry)) {
1938  EVENT_DEBUG("%s - Removing entry from pid_to_reap %i - %p", __FUNCTION__,
1939  reap->pid_ev ? reap->pid_ev->pid : -1, reap);
1940  fr_dlist_remove(&reap->el->pid_to_reap, reap);
1941  }
1942 
1943  return 0;
1944 }
1945 
1946 /** Asynchronously wait for a PID to exit, then reap it
1947  *
1948  * This is intended to be used when we no longer care about a process
1949  * exiting, but we still want to clean up its state so we don't have
1950  * zombie processes sticking around.
1951  *
1952  * @param[in] el to use to reap the process.
1953  * @param[in] pid to reap.
1954  * @param[in] callback to call when the process is reaped.
1955  * May be NULL.
1956  * @param[in] uctx to pass to callback.
1957  * @return
1958  * - -1 if we couldn't find the process or it has already exited/been reaped.
1959  * - 0 on success (we setup a process handler).
1960  */
1962 {
1963  int ret;
1964  fr_event_pid_reap_t *reap;
1965 
1966  reap = talloc_zero(NULL, fr_event_pid_reap_t);
1967  if (unlikely(!reap)) {
1968  fr_strerror_const("Out of memory");
1969  return -1;
1970  }
1971  talloc_set_destructor(reap, _fr_event_reap_free);
1972 
1973  ret = _fr_event_pid_wait(NDEBUG_LOCATION_VALS reap, el, &reap->pid_ev, pid, _fr_event_pid_reap_cb, reap);
1974  if (ret < 0) {
1975  talloc_free(reap);
1976  return ret;
1977  }
1978 
1979  reap->el = el;
1980  reap->callback = callback;
1981  reap->uctx = uctx;
1982 
1983  EVENT_DEBUG("%s - Adding reaper for PID %u - %p", __FUNCTION__, pid, reap);
1984 
1986 
1987  return ret;
1988 }
1989 
1990 /** Send a signal to all the processes we have in our reap list, and reap them
1991  *
1992  * @param[in] el containing the processes to reap.
1993  * @param[in] timeout how long to wait before we signal the processes.
1994  * @param[in] signal to send to processes. Should be a fatal signal.
1995  * @return The number of processes reaped.
1996  */
1998 {
1999  unsigned int processed = fr_dlist_num_elements(&el->pid_to_reap);
2000  fr_event_pid_reap_t *reap = NULL;
2001 
2002  /*
2003  * If we've got a timeout, our best option
2004  * is to use a kqueue instance to monitor
2005  * for process exit.
2006  */
2008  int status;
2009  struct kevent evset;
2010  int waiting = 0;
2011  int kq = kqueue();
2012  fr_time_t now, start = el->time(), end = fr_time_add(start, timeout);
2013 
2014  if (unlikely(kq < 0)) goto force;
2015 
2017  if (!i->pid_ev) {
2018  EVENT_DEBUG("%p - %s - Reaper already called (logic error)... - %p",
2019  el, __FUNCTION__, i);
2020 
2021  event_list_reap_run_callback(i, -1, SIGKILL);
2022  talloc_free(i);
2023  continue;
2024  }
2025 
2026  /*
2027  * See if any processes have exited already
2028  */
2029  if (waitpid(i->pid_ev->pid, &status, WNOHANG) == i->pid_ev->pid) { /* reap */
2030  EVENT_DEBUG("%p - %s - Reaper PID %u already exited - %p",
2031  el, __FUNCTION__, i->pid_ev->pid, i);
2032  event_list_reap_run_callback(i, i->pid_ev->pid, SIGKILL);
2033  talloc_free(i);
2034  continue;
2035  }
2036 
2037  /*
2038  * Add the rest to a temporary event loop
2039  */
2040  EV_SET(&evset, i->pid_ev->pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, i);
2041  if (kevent(kq, &evset, 1, NULL, 0, NULL) < 0) {
2042  EVENT_DEBUG("%p - %s - Failed adding reaper PID %u to tmp event loop - %p",
2043  el, __FUNCTION__, i->pid_ev->pid, i);
2044  event_list_reap_run_callback(i, i->pid_ev->pid, SIGKILL);
2045  talloc_free(i);
2046  continue;
2047  }
2048  waiting++;
2049  }}
2050 
2051  /*
2052  * Keep draining process exits as they come in...
2053  */
2054  while ((waiting > 0) && fr_time_gt(end, (now = el->time()))) {
2055  struct kevent kev;
2056  int ret;
2057 
2058  ret = kevent(kq, NULL, 0, &kev, 1, &fr_time_delta_to_timespec(fr_time_sub(end, now)));
2059  switch (ret) {
2060  default:
2061  EVENT_DEBUG("%p - %s - Reaper tmp loop error %s, forcing process reaping",
2062  el, __FUNCTION__, fr_syserror(errno));
2063  close(kq);
2064  goto force;
2065 
2066  case 0:
2067  EVENT_DEBUG("%p - %s - Reaper timeout waiting for process exit, forcing process reaping",
2068  el, __FUNCTION__);
2069  close(kq);
2070  goto force;
2071 
2072  case 1:
2073  reap = talloc_get_type_abort(kev.udata, fr_event_pid_reap_t);
2074 
2075  EVENT_DEBUG("%p - %s - Reaper reaped PID %u, status %u - %p",
2076  el, __FUNCTION__, (unsigned int)kev.ident, (unsigned int)kev.data, reap);
2077  waitpid(reap->pid_ev->pid, &status, WNOHANG); /* reap */
2078 
2079  event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
2080  talloc_free(reap);
2081  break;
2082  }
2083  waiting--;
2084  }
2085 
2086  close(kq);
2087  }
2088 
2089 force:
2090  /*
2091  * Deal with any lingering reap requests
2092  */
2093  while ((reap = fr_dlist_head(&el->pid_to_reap))) {
2094  int status;
2095 
2096  EVENT_DEBUG("%s - Reaper forcefully reaping PID %u - %p", __FUNCTION__, reap->pid_ev->pid, reap);
2097 
2098  if (kill(reap->pid_ev->pid, signal) < 0) {
2099  /*
2100  * Make sure we don't hang if the
2101  * process has actually exited.
2102  *
2103  * We could check for ESRCH but it's
2104  * not clear if that'd be returned
2105  * for a PID in the unreaped state
2106  * or not...
2107  */
2108  waitpid(reap->pid_ev->pid, &status, WNOHANG);
2109  event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
2110  talloc_free(reap);
2111  continue;
2112  }
2113 
2114  /*
2115  * Wait until the child process exits
2116  */
2117  waitpid(reap->pid_ev->pid, &status, 0);
2118  event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
2120  }
2121 
2122  return processed;
2123 }
2124 
2125 /** Memory will not be freed if we fail to remove the event from the kqueue
2126  *
2127  * It's easier to debug memory leaks with modern tooling than it is
2128  * to determine why we get random failures and event leaks inside of kqueue.
2129  *
2130  * @return
2131  * - 0 on success.
2132  * - -1 on failure.
2133  */
2135 {
2136  if (ev->is_registered) {
2137  struct kevent evset;
2138 
2139  EV_SET(&evset, (uintptr_t)ev, EVFILT_USER, EV_DELETE, 0, 0, 0);
2140 
2141  if (unlikely(kevent(ev->el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
2142  fr_strerror_printf("Failed removing user event - kevent %s", fr_syserror(evset.flags));
2143  return -1;
2144  }
2145  ev->is_registered = false;
2146  }
2147 
2148  return 0;
2149 }
2150 
2151 static inline CC_HINT(always_inline)
2152 void event_user_eval(fr_event_list_t *el, struct kevent *kev)
2153 {
2154  fr_event_user_t *ev;
2155 
2156  /*
2157  * This is just a "wakeup" event, which
2158  * is always ignored.
2159  */
2160  if (kev->ident == 0) return;
2161 
2162  ev = talloc_get_type_abort((void *)kev->ident, fr_event_user_t);
2163  fr_assert((uintptr_t)ev == kev->ident);
2164 
2165  ev->callback(el, ev->uctx);
2166 }
2167 
2168 /** Add a user callback to the event list.
2169  *
2170  * @param[in] ctx to allocate the event in.
2171  * @param[in] el Containing the timer events.
2172  * @param[out] ev_p Where to write a pointer.
2173  * @param[in] trigger Whether the user event is triggered initially.
2174  * @param[in] callback for EVFILT_USER.
2175  * @param[in] uctx for the callback.
2176  * @return
2177  * - 0 on success.
2178  * - -1 on error.
2179  */
2181  TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p,
2182  bool trigger, fr_event_user_cb_t callback, void *uctx)
2183 {
2184  fr_event_user_t *ev;
2185  struct kevent evset;
2186 
2187  ev = talloc(ctx, fr_event_user_t);
2188  if (unlikely(ev == NULL)) {
2189  fr_strerror_const("Out of memory");
2190  return -1;
2191  }
2192  *ev = (fr_event_user_t) {
2193  .el = el,
2194  .callback = callback,
2195  .uctx = uctx,
2196 #ifndef NDEBUG
2197  .file = file,
2198  .line = line,
2199 #endif
2200  };
2201 
2202  EV_SET(&evset, (uintptr_t)ev,
2203  EVFILT_USER, EV_ADD | EV_DISPATCH, (trigger * NOTE_TRIGGER), 0, ev);
2204 
2205  if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
2206  fr_strerror_printf("Failed adding user event - kevent %s", fr_syserror(evset.flags));
2207  talloc_free(ev);
2208  return -1;
2209  }
2210  ev->is_registered = true;
2211  talloc_set_destructor(ev, _event_user_delete);
2212 
2213  if (ev_p) *ev_p = ev;
2214 
2215  return 0;
2216 }
2217 
2218 /** Trigger a user event
2219  *
2220  * @param[in] el containing the user event.
2221  * @param[in] ev Handle for the user event.
2222  * @return
2223  * - 0 on success.
2224  * - -1 on error.
2225  */
2227 {
2228  struct kevent evset;
2229 
2230  EV_SET(&evset, (uintptr_t)ev, EVFILT_USER, EV_ENABLE, NOTE_TRIGGER, 0, NULL);
2231 
2232  if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
2233  fr_strerror_printf("Failed triggering user event - kevent %s", fr_syserror(evset.flags));
2234  return -1;
2235  }
2236 
2237  return 0;
2238 }
2239 
2240 /** Add a pre-event callback to the event list.
2241  *
2242  * Events are serviced in insert order. i.e. insert A, B, we then
2243  * have A running before B.
2244  *
2245  * @param[in] el Containing the timer events.
2246  * @param[in] callback The pre-processing callback.
2247  * @param[in] uctx for the callback.
2248  * @return
2249  * - < 0 on error
2250  * - 0 on success
2251  */
2253 {
2254  fr_event_pre_t *pre;
2255 
2256  pre = talloc(el, fr_event_pre_t);
2257  pre->callback = callback;
2258  pre->uctx = uctx;
2259 
2261 
2262  return 0;
2263 }
2264 
2265 /** Delete a pre-event callback from the event list.
2266  *
2267  * @param[in] el Containing the timer events.
2268  * @param[in] callback The pre-processing callback.
2269  * @param[in] uctx for the callback.
2270  * @return
2271  * - < 0 on error
2272  * - 0 on success
2273  */
2275 {
2276  fr_event_pre_t *pre, *next;
2277 
2278  for (pre = fr_dlist_head(&el->pre_callbacks);
2279  pre != NULL;
2280  pre = next) {
2281  next = fr_dlist_next(&el->pre_callbacks, pre);
2282 
2283  if ((pre->callback == callback) &&
2284  (pre->uctx == uctx)) {
2286  talloc_free(pre);
2287  return 0;
2288  }
2289  }
2290 
2291  return -1;
2292 }
2293 
2294 /** Add a post-event callback to the event list.
2295  *
2296  * Events are serviced in insert order. i.e. insert A, B, we then
2297  * have A running before B.
2298  *
2299  * @param[in] el Containing the timer events.
2300  * @param[in] callback The post-processing callback.
2301  * @param[in] uctx for the callback.
2302  * @return
2303  * - < 0 on error
2304  * - 0 on success
2305  */
2307 {
2308  fr_event_post_t *post;
2309 
2310  post = talloc(el, fr_event_post_t);
2311  post->callback = callback;
2312  post->uctx = uctx;
2313 
2315 
2316  return 0;
2317 }
2318 
2319 /** Delete a post-event callback from the event list.
2320  *
2321  * @param[in] el Containing the timer events.
2322  * @param[in] callback The post-processing callback.
2323  * @param[in] uctx for the callback.
2324  * @return
2325  * - < 0 on error
2326  * - 0 on success
2327  */
2329 {
2330  fr_event_post_t *post, *next;
2331 
2332  for (post = fr_dlist_head(&el->post_callbacks);
2333  post != NULL;
2334  post = next) {
2335  next = fr_dlist_next(&el->post_callbacks, post);
2336 
2337  if ((post->callback == callback) &&
2338  (post->uctx == uctx)) {
2340  talloc_free(post);
2341  return 0;
2342  }
2343  }
2344 
2345  return -1;
2346 }
2347 
2348 /** Run a single scheduled timer event
2349  *
2350  * @param[in] el containing the timer events.
2351  * @param[in] when Process events scheduled to run before or at this time.
2352  * @return
2353  * - 0 no timer events fired.
2354  * - 1 a timer event fired.
2355  */
2357 {
2358  fr_event_timer_cb_t callback;
2359  void *uctx;
2360  fr_event_timer_t *ev;
2361 
2362  if (unlikely(!el)) return 0;
2363 
2364  if (fr_lst_num_elements(el->times) == 0) {
2365  *when = fr_time_wrap(0);
2366  return 0;
2367  }
2368 
2369  ev = fr_lst_peek(el->times);
2370  if (!ev) {
2371  *when = fr_time_wrap(0);
2372  return 0;
2373  }
2374 
2375  /*
2376  * See if it's time to do this one.
2377  */
2378  if (fr_time_gt(ev->when, *when)) {
2379  *when = ev->when;
2380  return 0;
2381  }
2382 
2383  callback = ev->callback;
2384  memcpy(&uctx, &ev->uctx, sizeof(uctx));
2385 
2386  fr_assert(*ev->parent == ev);
2387 
2388  /*
2389  * Delete the event before calling it.
2390  */
2392 
2393  callback(el, *when, uctx);
2394 
2395  return 1;
2396 }
2397 
2398 /** Gather outstanding timer and file descriptor events
2399  *
2400  * @param[in] el to process events for.
2401  * @param[in] now The current time.
2402  * @param[in] wait if true, block on the kevent() call until a timer or file descriptor event occurs.
2403  * @return
2404  * - <0 error, or the event loop is exiting
2405  * - the number of outstanding I/O events, +1 if at least one timer will fire.
2406  */
2408 {
2409  fr_time_delta_t when, *wake;
2410  struct timespec ts_when, *ts_wake;
2411  fr_event_pre_t *pre;
2412  int num_fd_events;
2413  bool timer_event_ready = false;
2414  fr_event_timer_t *ev;
2415 
2416  el->num_fd_events = 0;
2417 
2418  if (el->will_exit || el->exit) {
2419  el->exit = el->will_exit;
2420 
2421  fr_strerror_const("Event loop exiting");
2422  return -1;
2423  }
2424 
2425  /*
2426  * By default we wait for 0ns, which means returning
2427  * immediately from kevent().
2428  */
2429  when = fr_time_delta_wrap(0);
2430  wake = &when;
2431  el->now = now;
2432 
2433  /*
2434  * See when we have to wake up. Either now, if the timer
2435  * events are in the past. Or, we wait for a future
2436  * timer event.
2437  */
2438  ev = fr_lst_peek(el->times);
2439  if (ev) {
2440  if (fr_time_lteq(ev->when, el->now)) {
2441  timer_event_ready = true;
2442 
2443  } else if (wait) {
2444  when = fr_time_sub(ev->when, el->now);
2445 
2446  } /* else we're not waiting, leave "when == 0" */
2447 
2448  } else if (wait) {
2449  /*
2450  * We're asked to wait, but there's no timer
2451  * event. We can then sleep forever.
2452  */
2453  wake = NULL;
2454  }
2455 
2456  /*
2457  * Run the status callbacks. It may tell us that the
2458  * application has more work to do, in which case we
2459  * re-set the timeout to be instant.
2460  *
2461  * We only run these callbacks if the caller is otherwise
2462  * idle.
2463  */
2464  if (wait) {
2465  for (pre = fr_dlist_head(&el->pre_callbacks);
2466  pre != NULL;
2467  pre = fr_dlist_next(&el->pre_callbacks, pre)) {
2468  if (pre->callback(now, wake ? *wake : fr_time_delta_wrap(0), pre->uctx) > 0) {
2469  wake = &when;
2470  when = fr_time_delta_wrap(0);
2471  }
2472  }
2473  }
2474 
2475  /*
2476  * Wake is the delta between el->now
2477  * (the event loops view of the current time)
2478  * and when the event should occur.
2479  */
2480  if (wake) {
2481  ts_when = fr_time_delta_to_timespec(when);
2482  ts_wake = &ts_when;
2483  } else {
2484  ts_wake = NULL;
2485  }
2486 
2487  /*
2488  * Populate el->events with the list of I/O events
2489  * that occurred since this function was last called
2490  * or wait for the next timer event.
2491  */
2492  num_fd_events = kevent(el->kq, NULL, 0, el->events, FR_EV_BATCH_FDS, ts_wake);
2493 
2494  /*
2495  * Interrupt is different from timeout / FD events.
2496  */
2497  if (unlikely(num_fd_events < 0)) {
2498  if (errno == EINTR) {
2499  return 0;
2500  } else {
2501  fr_strerror_printf("Failed calling kevent: %s", fr_syserror(errno));
2502  return -1;
2503  }
2504  }
2505 
2506  el->num_fd_events = num_fd_events;
2507 
2508  EVENT_DEBUG("%p - %s - kevent returned %u FD events", el, __FUNCTION__, el->num_fd_events);
2509 
2510  /*
2511  * If there are no FD events, we must have woken up from a timer
2512  */
2513  if (!num_fd_events) {
2514  el->now = fr_time_add(el->now, when);
2515  if (wait) timer_event_ready = true;
2516  }
2517  /*
2518  * The caller doesn't really care what the value of the
2519  * return code is. Just that it's greater than zero if
2520  * events needs servicing.
2521  *
2522  * num_fd_events > 0 - if kevent() returns FD events
2523  * timer_event_ready > 0 - if there were timers ready BEFORE or AFTER calling kevent()
2524  */
2525  return num_fd_events + timer_event_ready;
2526 }
2527 
2528 static inline CC_HINT(always_inline)
2529 void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags)
2530 {
2531  fr_event_fd_cb_t fd_cb;
2532 
2533  while ((fd_cb = event_fd_func(ef, filter, fflags))) {
2534  fd_cb(el, ef->fd, flags, ef->uctx);
2535  }
2536 }
2537 
2538 /** Service any outstanding timer or file descriptor events
2539  *
2540  * @param[in] el containing events to service.
2541  */
2543 {
2544  int i;
2545  fr_event_post_t *post;
2546  fr_time_t when;
2547  fr_event_timer_t *ev;
2548 
2549  if (unlikely(el->exit)) return;
2550 
2551  EVENT_DEBUG("%p - %s - Servicing %u FD events", el, __FUNCTION__, el->num_fd_events);
2552 
2553  /*
2554  * Run all of the file descriptor events.
2555  */
2556  el->in_handler = true;
2557  for (i = 0; i < el->num_fd_events; i++) {
2558  /*
2559  * Process any user events
2560  */
2561  switch (el->events[i].filter) {
2562  case EVFILT_USER:
2563  event_user_eval(el, &el->events[i]);
2564  continue;
2565 
2566  /*
2567  * Process proc events
2568  */
2569  case EVFILT_PROC:
2570  event_pid_eval(el, &el->events[i]);
2571  continue;
2572 
2573  /*
2574  * Process various types of file descriptor events
2575  */
2576  default:
2577  {
2578  fr_event_fd_t *ef = talloc_get_type_abort(el->events[i].udata, fr_event_fd_t);
2579  int fd_errno = 0;
2580 
2581  int fflags = el->events[i].fflags; /* mutable */
2582  int filter = el->events[i].filter;
2583  int flags = el->events[i].flags;
2584 
2585  if (!ef->is_registered) continue; /* Was deleted between corral and service */
2586 
2587  if (unlikely(flags & EV_ERROR)) {
2588  fd_errno = el->events[i].data;
2589  ev_error:
2590  /*
2591  * Call the error handler, but only if the socket hasn't been deleted at EOF
2592  * below.
2593  */
2594  if (ef->is_registered && ef->error) ef->error(el, ef->fd, flags, fd_errno, ef->uctx);
2595  TALLOC_FREE(ef);
2596  continue;
2597  }
2598 
2599  /*
2600  * EOF can indicate we've actually reached
2601  * the end of a file, but for sockets it usually
2602  * indicates the other end of the connection
2603  * has gone away.
2604  */
2605  if (flags & EV_EOF) {
2606  /*
2607  * This is fine, the callback will get notified
2608  * via the flags field.
2609  */
2610  if (ef->type == FR_EVENT_FD_FILE) goto service;
2611 #if defined(__linux__) && defined(SO_GET_FILTER)
2612  /*
2613  * There seems to be an issue with the
2614  * ioctl(...SIOCNQ...) call libkqueue
2615  * uses to determine the number of bytes
2616  * readable. When ioctl returns, the number
2617  * of bytes available is set to zero, which
2618  * libkqueue interprets as EOF.
2619  *
2620  * As a workaround, if we're not reading
2621  * a file, and are operating on a raw socket
2622  * with a packet filter attached, we ignore
2623  * the EOF flag and continue.
2624  */
2625  if ((ef->sock_type == SOCK_RAW) && (ef->type == FR_EVENT_FD_PCAP)) goto service;
2626 #endif
2627 
2628  /*
2629  * If we see an EV_EOF flag that means the
2630  * read side of the socket has been closed
2631  * but there may still be pending data.
2632  *
2633  * Dispatch the read event and then error.
2634  */
2635  if ((el->events[i].filter == EVFILT_READ) && (el->events[i].data > 0)) {
2636  event_callback(el, ef, &filter, flags, &fflags);
2637  }
2638 
2639  fd_errno = el->events[i].fflags;
2640 
2641  goto ev_error;
2642  }
2643 
2644  service:
2645 #ifndef NDEBUG
2646  EVENT_DEBUG("Running event for fd %d, from %s[%d]", ef->fd, ef->file, ef->line);
2647 #endif
2648 
2649  /*
2650  * Service the event_fd events
2651  */
2652  event_callback(el, ef, &filter, flags, &fflags);
2653  }
2654  }
2655  }
2656 
2657  /*
2658  * Process any deferred frees performed
2659  * by the I/O handlers.
2660  *
2661  * The events are removed from the FD rbtree
2662  * and kevent immediately, but frees are
2663  * deferred to allow stale events to be
2664  * skipped sans SEGV.
2665  */
2666  el->in_handler = false; /* Allow events to be deleted */
2667  {
2668  fr_event_fd_t *ef;
2669 
2670  while ((ef = fr_dlist_head(&el->fd_to_free))) talloc_free(ef);
2671  }
2672 
2673  /*
2674  * We must call el->time() again here, else the event
2675  * list's time gets updated too infrequently, and we
2676  * can end up with a situation where timers are
2677  * serviced much later than they should be, which can
2678  * cause strange interaction effects, spurious calls
2679  * to kevent, and busy loops.
2680  */
2681  el->now = el->time();
2682 
2683  /*
2684  * Run all of the timer events. Note that these can add
2685  * new timers!
2686  */
2687  if (fr_lst_num_elements(el->times) > 0) {
2688  el->in_handler = true;
2689 
2690  do {
2691  when = el->now;
2692  } while (fr_event_timer_run(el, &when) == 1);
2693 
2694  el->in_handler = false;
2695  }
2696 
2697  /*
2698  * New timers can be added while running the timer
2699  * callback. Instead of being added to the main timer
2700  * lst, they are instead added to the "to do" list.
2701  * Once we're finished running the callbacks, we walk
2702  * through the "to do" list, and add the callbacks to the
2703  * timer lst.
2704  *
2705  * Doing it this way prevents the server from running
2706  * into an infinite loop. The timer callback MAY add a
2707  * new timer which is in the past. The loop above would
2708  * then immediately run the new callback, which could
2709  * also add an event in the past...
2710  */
2711  while ((ev = fr_dlist_head(&el->ev_to_add)) != NULL) {
2712  (void)fr_dlist_remove(&el->ev_to_add, ev);
2713  if (unlikely(fr_lst_insert(el->times, ev) < 0)) {
2714  talloc_free(ev);
2715  fr_assert_msg(0, "failed inserting lst event: %s", fr_strerror()); /* Die in debug builds */
2716  }
2717  }
2718  el->now = el->time();
2719 
2720  /*
2721  * Run all of the post-processing events.
2722  */
2723  for (post = fr_dlist_head(&el->post_callbacks);
2724  post != NULL;
2725  post = fr_dlist_next(&el->post_callbacks, post)) {
2726  post->callback(el, el->now, post->uctx);
2727  }
2728 }
2729 
2730 /** Signal an event loop exit with the specified code
2731  *
2732  * The event loop will complete its current iteration, and then exit with the specified code.
2733  *
2734  * @param[in] el to signal to exit.
2735  * @param[in] code for #fr_event_loop to return.
2736  */
2738 {
2739  if (unlikely(!el)) return;
2740 
2741  el->will_exit = code;
2742 }
2743 
2744 /** Check to see whether the event loop is in the process of exiting
2745  *
2746  * @param[in] el to check.
2747  */
2749 {
2750  return ((el->will_exit != 0) || (el->exit != 0));
2751 }
2752 
2753 /** Run an event loop
2754  *
2755  * @note Will not return until #fr_event_loop_exit is called.
2756  *
2757  * @param[in] el to start processing.
2758  */
2759 CC_HINT(flatten) int fr_event_loop(fr_event_list_t *el)
2760 {
2761  el->will_exit = el->exit = 0;
2762 
2763  el->dispatch = true;
2764  while (!el->exit) {
2765  if (unlikely(fr_event_corral(el, el->time(), true)) < 0) break;
2767  }
2768 
2769  /*
2770  * Give processes five seconds to exit.
2771  * This means any triggers that we may
2772  * have issued when the server exited
2773  * have a chance to complete.
2774  */
2776  el->dispatch = false;
2777 
2778  return el->exit;
2779 }
2780 
2781 /** Cleanup an event list
2782  *
2783  * Frees/destroys any resources associated with an event list
2784  *
2785  * @param[in] el to free resources for.
2786  */
2788 {
2789  fr_event_timer_t const *ev;
2790 
2791  while ((ev = fr_lst_peek(el->times)) != NULL) fr_event_timer_delete(&ev);
2792 
2794 
2795  talloc_free_children(el);
2796 
2797  if (el->kq >= 0) close(el->kq);
2798 
2799  return 0;
2800 }
2801 
2802 /** Free any memory we allocated for indexes
2803  *
2804  */
2805 static int _event_free_indexes(UNUSED void *uctx)
2806 {
2807  unsigned int i;
2808 
2809  for (i = 0; i < NUM_ELEMENTS(filter_maps); i++) if (talloc_free(filter_maps[i].ev_to_func) < 0) return -1;
2810  return 0;
2811 }
2812 
2813 static int _event_build_indexes(UNUSED void *uctx)
2814 {
2815  unsigned int i;
2816 
2817  for (i = 0; i < NUM_ELEMENTS(filter_maps); i++) event_fd_func_index_build(&filter_maps[i]);
2818  return 0;
2819 }
2820 
2821 #ifdef EVFILT_LIBKQUEUE
2822 /** kqueue logging wrapper function
2823  *
2824  */
2825 static CC_HINT(format (printf, 1, 2)) CC_HINT(nonnull)
2826 void _event_kqueue_log(char const *fmt, ...)
2827 {
2828  va_list ap;
2829 
2830  va_start(ap, fmt);
2831  fr_vlog(&default_log, L_DBG, __FILE__, __LINE__, fmt, ap);
2832  va_end(ap);
2833 }
2834 
2835 /** If we're building with libkqueue, and at debug level 4 or higher, enable libkqueue debugging output
2836  *
2837  * This requires a debug build of libkqueue
2838  */
2839 static int _event_kqueue_logging(UNUSED void *uctx)
2840 {
2841  struct kevent kev, receipt;
2842 
2843  log_conf_kq = kqueue();
2844  if (unlikely(log_conf_kq < 0)) {
2845  fr_strerror_const("Failed initialising logging configuration kqueue");
2846  return -1;
2847  }
2848 
2849  EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG_FUNC, (intptr_t)_event_kqueue_log, NULL);
2850  if (kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}) != 1) {
2851  close(log_conf_kq);
2852  log_conf_kq = -1;
2853  return 1;
2854  }
2855 
2856  if (fr_debug_lvl >= L_DBG_LVL_3) {
2857  EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG, 1, NULL);
2858  if (kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}) != 1) {
2859  fr_strerror_const("Failed enabling libkqueue debug logging");
2860  close(log_conf_kq);
2861  log_conf_kq = -1;
2862  return -1;
2863  }
2864  }
2865 
2866  return 0;
2867 }
2868 
2869 static int _event_kqueue_logging_stop(UNUSED void *uctx)
2870 {
2871  struct kevent kev, receipt;
2872 
2873  EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG_FUNC, 0, NULL);
2874  (void)kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){});
2875 
2876  close(log_conf_kq);
2877  log_conf_kq = -1;
2878 
2879  return 0;
2880 }
2881 #endif
2882 
2883 /** Initialise a new event list
2884  *
2885  * @param[in] ctx to allocate memory in.
2886  * @param[in] status callback, called on each iteration of the event list.
2887  * @param[in] status_uctx context for the status callback
2888  * @return
2889  * - A pointer to a new event list on success (free with talloc_free).
2890  * - NULL on error.
2891  */
2892 fr_event_list_t *fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_uctx)
2893 {
2895  struct kevent kev;
2896  int ret;
2897 
2898  /*
2899  * Build the map indexes the first time this
2900  * function is called.
2901  */
2902  fr_atexit_global_once_ret(&ret, _event_build_indexes, _event_free_indexes, NULL);
2903 #ifdef EVFILT_LIBKQUEUE
2904  fr_atexit_global_once_ret(&ret, _event_kqueue_logging, _event_kqueue_logging_stop, NULL);
2905 #endif
2906 
2907  el = talloc_zero(ctx, fr_event_list_t);
2908  if (!fr_cond_assert(el)) {
2909  fr_strerror_const("Out of memory");
2910  return NULL;
2911  }
2912  el->time = fr_time;
2913  el->kq = -1; /* So destructor can be used before kqueue() provides us with fd */
2914  talloc_set_destructor(el, _event_list_free);
2915 
2917  if (!el->times) {
2918  fr_strerror_const("Failed allocating event lst");
2919  error:
2920  talloc_free(el);
2921  return NULL;
2922  }
2923 
2925  if (!el->fds) {
2926  fr_strerror_const("Failed allocating FD tree");
2927  goto error;
2928  }
2929 
2930  el->kq = kqueue();
2931  if (el->kq < 0) {
2932  fr_strerror_printf("Failed allocating kqueue: %s", fr_syserror(errno));
2933  goto error;
2934  }
2935 
2941  if (status) (void) fr_event_pre_insert(el, status, status_uctx);
2942 
2943  /*
2944  * Set our "exit" callback as ident 0.
2945  */
2946  EV_SET(&kev, 0, EVFILT_USER, EV_ADD | EV_CLEAR, NOTE_FFNOP, 0, NULL);
2947  if (kevent(el->kq, &kev, 1, NULL, 0, NULL) < 0) {
2948  fr_strerror_printf("Failed adding exit callback to kqueue: %s", fr_syserror(errno));
2949  goto error;
2950  }
2951 
2952 #ifdef WITH_EVENT_DEBUG
2953  fr_event_timer_in(el, el, &el->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), fr_event_report, NULL);
2954 #endif
2955 
2956  return el;
2957 }
2958 
2959 /** Override event list time source
2960  *
2961  * @param[in] el to set new time function for.
2962  * @param[in] func to set.
2963  */
2965 {
2966  el->time = func;
2967 }
2968 
2969 /** Return whether the event loop has any active events
2970  *
2971  */
2973 {
2975 }
2976 
2977 #ifdef WITH_EVENT_DEBUG
2978 static const fr_time_delta_t decades[18] = {
2979  { 1 }, { 10 }, { 100 },
2980  { 1000 }, { 10000 }, { 100000 },
2981  { 1000000 }, { 10000000 }, { 100000000 },
2982  { 1000000000 }, { 10000000000 }, { 100000000000 },
2983  { 1000000000000 }, { 10000000000000 }, { 100000000000000 },
2984  { 1000000000000000 }, { 10000000000000000 }, { 100000000000000000 },
2985 };
2986 
2987 static const char *decade_names[18] = {
2988  "1ns", "10ns", "100ns",
2989  "1us", "10us", "100us",
2990  "1ms", "10ms", "100ms",
2991  "1s", "10s", "100s",
2992  "1Ks", "10Ks", "100Ks",
2993  "1Ms", "10Ms", "100Ms", /* 1 year is 300Ms */
2994 };
2995 
2996 typedef struct {
2997  fr_rb_node_t node;
2998  char const *file;
2999  int line;
3000  uint32_t count;
3001 } fr_event_counter_t;
3002 
3003 static int8_t event_timer_location_cmp(void const *one, void const *two)
3004 {
3005  fr_event_counter_t const *a = one;
3006  fr_event_counter_t const *b = two;
3007 
3008  CMP_RETURN(a, b, file);
3009 
3010  return CMP(a->line, b->line);
3011 }
3012 
3013 
3014 /** Print out information about the number of events in the event loop
3015  *
3016  */
3017 void fr_event_report(fr_event_list_t *el, fr_time_t now, void *uctx)
3018 {
3019  fr_lst_iter_t iter;
3020  fr_event_timer_t const *ev;
3021  size_t i;
3022 
3023  size_t array[NUM_ELEMENTS(decades)] = { 0 };
3024  fr_rb_tree_t *locations[NUM_ELEMENTS(decades)];
3025  TALLOC_CTX *tmp_ctx;
3026  static pthread_mutex_t print_lock = PTHREAD_MUTEX_INITIALIZER;
3027 
3028  tmp_ctx = talloc_init_const("temporary stats");
3029  if (!tmp_ctx) {
3030  oom:
3031  EVENT_DEBUG("Can't do report, out of memory");
3032  talloc_free(tmp_ctx);
3033  return;
3034  }
3035 
3036  for (i = 0; i < NUM_ELEMENTS(decades); i++) {
3037  locations[i] = fr_rb_inline_alloc(tmp_ctx, fr_event_counter_t, node, event_timer_location_cmp, NULL);
3038  if (!locations[i]) goto oom;
3039  }
3040 
3041  /*
3042  * Show which events are due, when they're due,
3043  * and where they were allocated
3044  */
3045  for (ev = fr_lst_iter_init(el->times, &iter);
3046  ev != NULL;
3047  ev = fr_lst_iter_next(el->times, &iter)) {
3048  fr_time_delta_t diff = fr_time_sub(ev->when, now);
3049 
3050  for (i = 0; i < NUM_ELEMENTS(decades); i++) {
3051  if ((fr_time_delta_cmp(diff, decades[i]) <= 0) || (i == NUM_ELEMENTS(decades) - 1)) {
3052  fr_event_counter_t find = { .file = ev->file, .line = ev->line };
3053  fr_event_counter_t *counter;
3054 
3055  counter = fr_rb_find(locations[i], &find);
3056  if (!counter) {
3057  counter = talloc(locations[i], fr_event_counter_t);
3058  if (!counter) goto oom;
3059  counter->file = ev->file;
3060  counter->line = ev->line;
3061  counter->count = 1;
3062  fr_rb_insert(locations[i], counter);
3063  } else {
3064  counter->count++;
3065  }
3066 
3067  array[i]++;
3068  break;
3069  }
3070  }
3071  }
3072 
3073  pthread_mutex_lock(&print_lock);
3074  EVENT_DEBUG("%p - Event list stats", el);
3075  EVENT_DEBUG(" fd events : %"PRIu64, fr_event_list_num_fds(el));
3076  EVENT_DEBUG(" events last iter : %u", el->num_fd_events);
3077  EVENT_DEBUG(" num timer events : %"PRIu64, fr_event_list_num_timers(el));
3078 
3079  for (i = 0; i < NUM_ELEMENTS(decades); i++) {
3080  fr_rb_iter_inorder_t event_iter;
3081  void *node;
3082 
3083  if (!array[i]) continue;
3084 
3085  if (i == 0) {
3086  EVENT_DEBUG(" events <= %5s : %zu", decade_names[i], array[i]);
3087  } else if (i == (NUM_ELEMENTS(decades) - 1)) {
3088  EVENT_DEBUG(" events > %5s : %zu", decade_names[i - 1], array[i]);
3089  } else {
3090  EVENT_DEBUG(" events %5s - %5s : %zu", decade_names[i - 1], decade_names[i], array[i]);
3091  }
3092 
3093  for (node = fr_rb_iter_init_inorder(&event_iter, locations[i]);
3094  node;
3095  node = fr_rb_iter_next_inorder(&event_iter)) {
3096  fr_event_counter_t *counter = talloc_get_type_abort(node, fr_event_counter_t);
3097 
3098  EVENT_DEBUG(" : %u allocd at %s[%u]",
3099  counter->count, counter->file, counter->line);
3100  }
3101  }
3102  pthread_mutex_unlock(&print_lock);
3103 
3104  fr_event_timer_in(el, el, &el->report, fr_time_delta_from_sec(EVENT_REPORT_FREQ), fr_event_report, uctx);
3105  talloc_free(tmp_ctx);
3106 }
3107 
3108 #ifndef NDEBUG
3109 void fr_event_timer_dump(fr_event_list_t *el)
3110 {
3111  fr_lst_iter_t iter;
3112  fr_event_timer_t *ev;
3113  fr_time_t now;
3114 
3115  now = el->time();
3116 
3117  EVENT_DEBUG("Time is now %"PRId64"", fr_time_unwrap(now));
3118 
3119  for (ev = fr_lst_iter_init(el->times, &iter);
3120  ev;
3121  ev = fr_lst_iter_next(el->times, &iter)) {
3122  (void)talloc_get_type_abort(ev, fr_event_timer_t);
3123  EVENT_DEBUG("%s[%u]: %p time=%" PRId64 " (%c), callback=%p",
3124  ev->file, ev->line, ev, fr_time_unwrap(ev->when),
3125  fr_time_gt(now, ev->when) ? '<' : '>', ev->callback);
3126  }
3127 }
3128 #endif
3129 #endif
3130 
3131 #ifdef TESTING
3132 
3133 /*
3134  * cc -g -I .. -c rb.c -o rbtree.o && cc -g -I .. -c isaac.c -o isaac.o && cc -DTESTING -I .. -c event.c -o event_mine.o && cc event_mine.o rbtree.o isaac.o -o event
3135  *
3136  * ./event
3137  *
3138  * And hit CTRL-S to stop the output, CTRL-Q to continue.
3139  * It normally alternates printing the time and sleeping,
3140  * but when you hit CTRL-S/CTRL-Q, you should see a number
3141  * of events run right after each other.
3142  *
3143  * OR
3144  *
3145  * valgrind --tool=memcheck --leak-check=full --show-reachable=yes ./event
3146  */
3147 
3148 static void print_time(void *ctx)
3149 {
3150  fr_time_t when;
3151  int64_t usec;
3152 
3153  when = *(fr_time_t *) ctx;
3154  usec = fr_time_to_usec(when);
3155 
3156  printf("%d.%06d\n", usec / USEC, usec % USEC);
3157  fflush(stdout);
3158 }
3159 
3160 static fr_randctx rand_pool;
3161 
3162 static uint32_t event_rand(void)
3163 {
3164  uint32_t num;
3165 
3166  num = rand_pool.randrsl[rand_pool.randcnt++];
3167  if (rand_pool.randcnt == 256) {
3168  fr_isaac(&rand_pool);
3169  rand_pool.randcnt = 0;
3170  }
3171 
3172  return num;
3173 }
3174 
3175 
3176 #define MAX 100
3177 int main(int argc, char **argv)
3178 {
3179  int i, rcode;
3180  fr_time_t array[MAX];
3181  fr_time_t now, when;
3183 
3184  el = fr_event_list_alloc(NULL, NULL);
3185  if (!el) fr_exit_now(1);
3186 
3187  memset(&rand_pool, 0, sizeof(rand_pool));
3188  rand_pool.randrsl[1] = time(NULL);
3189 
3190  fr_rand_init(&rand_pool, 1);
3191  rand_pool.randcnt = 0;
3192 
3193  array[0] = el->time();
3194  for (i = 1; i < MAX; i++) {
3195  array[i] = array[i - 1];
3196  array[i] += event_rand() & 0xffff;
3197 
3198  fr_event_timer_at(NULL, el, array[i], print_time, array[i]);
3199  }
3200 
3201  while (fr_event_list_num_timers(el)) {
3202  now = el->time();
3203  when = now;
3204  if (!fr_event_timer_run(el, &when)) {
3205  int delay = (when - now) / 1000; /* nanoseconds to microseconds */
3206 
3207  printf("\tsleep %d microseconds\n", delay);
3208  fflush(stdout);
3209  usleep(delay);
3210  }
3211  }
3212 
3213  talloc_free(el);
3214 
3215  return 0;
3216 }
3217 #endif
int const char * file
Definition: acutest.h:702
va_end(args)
static int const char * fmt
Definition: acutest.h:573
int const char int line
Definition: acutest.h:702
va_start(args, fmt)
#define UNCONST(_type, _ptr)
Remove const qualification from a pointer.
Definition: build.h:165
#define RCSID(id)
Definition: build.h:444
#define DIAG_UNKNOWN_PRAGMAS
Definition: build.h:417
#define L(_str)
Helper for initialising arrays of string literals.
Definition: build.h:207
#define DIAG_ON(_x)
Definition: build.h:419
#define CMP_RETURN(_a, _b, _field)
Return if the comparison is not 0 (is unequal)
Definition: build.h:119
#define CMP(_a, _b)
Same as CMP_PREFER_SMALLER use when you don't really care about ordering, you just want an ordering.
Definition: build.h:110
#define unlikely(_x)
Definition: build.h:378
#define NDEBUG_LOCATION_VALS
Definition: build.h:262
#define NDEBUG_LOCATION_ARGS
Pass caller information to the function.
Definition: build.h:261
#define UNUSED
Definition: build.h:313
#define NUM_ELEMENTS(_t)
Definition: build.h:335
#define DIAG_OFF(_x)
Definition: build.h:418
static int kq
Definition: control_test.c:46
#define fr_cond_assert(_x)
Calls panic_action ifndef NDEBUG, else logs error and evaluates to value of _x.
Definition: debug.h:137
#define fr_assert_msg(_x, _msg,...)
Calls panic_action ifndef NDEBUG, else logs error and causes the server to exit immediately with code...
Definition: debug.h:208
#define fr_assert_fail(_msg,...)
Calls panic_action ifndef NDEBUG, else logs error.
Definition: debug.h:214
#define fr_cond_assert_msg(_x, _fmt,...)
Calls panic_action ifndef NDEBUG, else logs error and evaluates to value of _x.
Definition: debug.h:154
#define fr_exit_now(_x)
Exit without calling atexit() handlers, producing a log message in debug builds.
Definition: debug.h:232
int main(int argc, char **argv)
Definition: dhcpclient.c:521
static fr_time_delta_t timeout
Definition: dhcpclient.c:54
static void * fr_dlist_next(fr_dlist_head_t const *list_head, void const *ptr)
Get the next item in a list.
Definition: dlist.h:555
static bool fr_dlist_entry_in_list(fr_dlist_t const *entry)
Check if a list entry is part of a list.
Definition: dlist.h:163
static unsigned int fr_dlist_num_elements(fr_dlist_head_t const *head)
Return the number of elements in the dlist.
Definition: dlist.h:939
static void * fr_dlist_head(fr_dlist_head_t const *list_head)
Return the HEAD item of a list or NULL if the list is empty.
Definition: dlist.h:486
static int fr_dlist_insert_tail(fr_dlist_head_t *list_head, void *ptr)
Insert an item into the tail of a list.
Definition: dlist.h:378
static void * fr_dlist_remove(fr_dlist_head_t *list_head, void *ptr)
Remove an item from the list.
Definition: dlist.h:638
#define fr_dlist_talloc_init(_head, _type, _field)
Initialise the head structure of a doubly linked list.
Definition: dlist.h:275
static int fr_dlist_insert_head(fr_dlist_head_t *list_head, void *ptr)
Insert an item into the head of a list.
Definition: dlist.h:338
#define fr_dlist_foreach_safe(_list_head, _type, _iter)
Iterate over the contents of a list allowing for removals.
Definition: dlist.h:108
Head of a doubly linked list.
Definition: dlist.h:51
Entry in a doubly linked list.
Definition: dlist.h:41
#define fr_event_user_insert(_ctx, _ev_p, _el, _trigger, _callback, _uctx)
Definition: event.h:280
fr_event_io_func_t io
Read/write functions.
Definition: event.h:199
void(* fr_event_timer_cb_t)(fr_event_list_t *el, fr_time_t now, void *uctx)
Called when a timer event fires.
Definition: event.h:118
struct fr_event_user_s fr_event_user_t
An opaquer user event handle.
Definition: event.h:57
void(* fr_event_fd_cb_t)(fr_event_list_t *el, int fd, int flags, void *uctx)
Called when an IO event occurs on a file descriptor.
Definition: event.h:137
@ FR_EVENT_OP_SUSPEND
Temporarily remove the relevant filter from kevent.
Definition: event.h:69
@ FR_EVENT_OP_RESUME
Reinsert the filter into kevent.
Definition: event.h:70
fr_event_filter_t
The type of filter to install for an FD.
Definition: event.h:61
@ FR_EVENT_FILTER_VNODE
Filter for vnode subfilters.
Definition: event.h:63
@ FR_EVENT_FILTER_IO
Combined filter for read/write functions/.
Definition: event.h:62
size_t offset
Offset of function in func struct.
Definition: event.h:76
struct fr_event_pid fr_event_pid_t
An opaque PID status handle.
Definition: event.h:53
fr_event_fd_cb_t read
Callback for when data is available.
Definition: event.h:174
void(* fr_event_pid_cb_t)(fr_event_list_t *el, pid_t pid, int status, void *uctx)
Called when a child process has exited.
Definition: event.h:156
void(* fr_event_error_cb_t)(fr_event_list_t *el, int fd, int flags, int fd_errno, void *uctx)
Called when an IO error event occurs on a file descriptor.
Definition: event.h:147
fr_time_t(* fr_event_time_source_t)(void)
Alternative time source, useful for testing.
Definition: event.h:169
int(* fr_event_status_cb_t)(fr_time_t now, fr_time_delta_t wake, void *uctx)
Called after each event loop cycle.
Definition: event.h:128
fr_event_op_t op
Operation to perform on function/filter.
Definition: event.h:77
#define fr_event_timer_at(...)
Definition: event.h:250
#define fr_event_timer_in(...)
Definition: event.h:255
void(* fr_event_user_cb_t)(fr_event_list_t *el, void *uctx)
Called when a user kevent occurs.
Definition: event.h:163
Callbacks for the FR_EVENT_FILTER_IO filter.
Definition: event.h:173
Structure describing a modification to a filter's state.
Definition: event.h:75
Callbacks for the FR_EVENT_FILTER_VNODE filter.
Definition: event.h:180
Union of all filter functions.
Definition: event.h:198
free(array)
void fr_isaac(fr_randctx *ctx)
Definition: isaac.c:46
int fr_event_post_delete(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx)
Delete a post-event callback from the event list.
Definition: event.c:2328
void fr_event_service(fr_event_list_t *el)
Service any outstanding timer or file descriptor events.
Definition: event.c:2542
fr_dlist_head_t ev_to_add
dlist of events to add
Definition: event.c:440
static int _event_timer_free(fr_event_timer_t *ev)
Remove an event from the event loop.
Definition: event.c:1389
fr_event_list_t * fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_uctx)
Initialise a new event list.
Definition: event.c:2892
static fr_event_func_map_t filter_maps[]
Definition: event.c:172
static int8_t fr_event_timer_cmp(void const *a, void const *b)
Compare two timer events to see which one should occur first.
Definition: event.c:569
static int fr_event_fd_type_set(fr_event_fd_t *ef, int fd)
Discover the type of a file descriptor.
Definition: event.c:804
fr_dlist_t entry
List of deferred timer events.
Definition: event.c:114
fr_event_func_map_entry_t * func_to_ev
Function -> Event maps coalesced, out of order.
Definition: event.c:168
fr_event_error_cb_t error
Callback for when an error occurs on the FD.
Definition: event.c:310
char const * file
Source file this event was last updated in.
Definition: event.c:352
static int8_t fr_event_fd_cmp(void const *one, void const *two)
Compare two file descriptor handles.
Definition: event.c:582
fr_event_pid_cb_t callback
callback to run when the child exits
Definition: event.c:368
fr_event_funcs_t stored
Stored (set, but inactive) filter functions.
Definition: event.c:308
static ssize_t fr_event_build_evset(UNUSED fr_event_list_t *el, struct kevent out_kev[], size_t outlen, fr_event_funcs_t *active, fr_event_fd_t *ef, fr_event_funcs_t const *new, fr_event_funcs_t const *prev)
Build a new evset based on function pointers present.
Definition: event.c:671
fr_dlist_head_t pid_to_reap
A list of all orphaned child processes we're waiting to reap.
Definition: event.c:431
bool is_registered
Whether this fr_event_fd_t's FD has been registered with kevent.
Definition: event.c:314
fr_rb_tree_t * fds
Tree used to track FDs with filters in kqueue.
Definition: event.c:413
char const * file
Source file this event was last updated in.
Definition: event.c:327
fr_time_t fr_event_list_time(fr_event_list_t *el)
Get the current server time according to the event list.
Definition: event.c:636
int fr_event_pre_delete(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx)
Delete a pre-event callback from the event list.
Definition: event.c:2274
fr_time_t fr_event_timer_when(fr_event_timer_t const *ev)
Internal timestamp representing when the timer should fire.
Definition: event.c:1626
void fr_event_list_set_time_func(fr_event_list_t *el, fr_event_time_source_t func)
Override event list time source.
Definition: event.c:2964
fr_event_list_t * el
Event list this event belongs to.
Definition: event.c:333
static void event_list_reap_run_callback(fr_event_pid_reap_t *reap, pid_t pid, int status)
Saves some boilerplate...
Definition: event.c:1910
int line
Line this event was last updated on.
Definition: event.c:386
static int _event_fd_delete(fr_event_fd_t *ef)
Remove a file descriptor from the event loop and rbtree but don't explicitly free it.
Definition: event.c:861
int _fr_event_pid_reap(NDEBUG_LOCATION_ARGS fr_event_list_t *el, pid_t pid, fr_event_pid_cb_t callback, void *uctx)
Asynchronously wait for a PID to exit, then reap it.
Definition: event.c:1961
fr_event_filter_t filter
Definition: event.c:300
fr_dlist_head_t pre_callbacks
callbacks when we may be idle...
Definition: event.c:428
#define FR_EVENT_FD_PCAP
Definition: event.c:145
void * uctx
Context pointer to pass to each file descriptor callback.
Definition: event.c:369
fr_event_status_cb_t callback
The callback to call.
Definition: event.c:395
fr_event_timer_cb_t callback
The callback to call.
Definition: event.c:404
static void _fr_event_pid_reap_cb(UNUSED fr_event_list_t *el, pid_t pid, int status, void *uctx)
Does the actual reaping of PIDs.
Definition: event.c:1918
int line
Line this event was last updated on.
Definition: event.c:120
int line
Line this event was last updated on.
Definition: event.c:353
static size_t kevent_filter_table_len
Definition: event.c:93
fr_dlist_head_t post_callbacks
post-processing callbacks
Definition: event.c:429
int num_fd_events
Number of events in this event list.
Definition: event.c:424
fr_event_timer_cb_t callback
Callback to execute when the timer fires.
Definition: event.c:105
int _fr_event_user_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p, bool trigger, fr_event_user_cb_t callback, void *uctx)
Add a user callback to the event list.
Definition: event.c:2180
fr_event_fd_type_t type
Type of events we're interested in.
Definition: event.c:303
static fr_table_num_sorted_t const fr_event_fd_type_table[]
Definition: event.c:283
static size_t fr_event_fd_type_table_len
Definition: event.c:289
int _fr_event_timer_in(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p, fr_time_delta_t delta, fr_event_timer_cb_t callback, void const *uctx)
Insert a timer event into an event list.
Definition: event.c:1589
uint16_t flags
Flags to use for inserting event.
Definition: event.c:159
waitpid(reap->pid_ev->pid, &status, 0)
fr_event_pid_cb_t callback
callback to run when the child exits
Definition: event.c:341
static int _event_list_free(fr_event_list_t *el)
Cleanup an event list.
Definition: event.c:2787
bool coalesce
Coalesce this map with the next.
Definition: event.c:162
fr_dlist_t entry
Entry in free list.
Definition: event.c:320
int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait)
Gather outstanding timer and file descriptor events.
Definition: event.c:2407
static int _event_free_indexes(UNUSED void *uctx)
Free any memory we allocated for indexes.
Definition: event.c:2805
fr_event_fd_cb_t fr_event_fd_cb(fr_event_fd_t *ef, int kq_filter, int kq_fflags)
Returns the appropriate callback function for a given event.
Definition: event.c:1311
void * uctx
Context for the callback.
Definition: event.c:405
bool is_registered
Whether this user event has been registered with the event loop.
Definition: event.c:378
void const * uctx
Context pointer to pass to the callback.
Definition: event.c:106
return processed
Definition: event.c:2122
int type
Type this filter applies to.
Definition: event.c:161
uint64_t fr_event_list_num_timers(fr_event_list_t *el)
Return the number of timer events currently scheduled.
Definition: event.c:606
fr_event_func_map_t const * map
Function map between fr_event_funcs_t and kevent filters.
Definition: event.c:312
void * uctx
Context for the callback.
Definition: event.c:396
int _fr_event_pid_wait(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p, pid_t pid, fr_event_pid_cb_t callback, void *uctx)
Insert a PID event into an event list.
Definition: event.c:1734
char const * name
Name of the event.
Definition: event.c:157
int fr_event_user_trigger(fr_event_list_t *el, fr_event_user_t *ev)
Trigger a user event.
Definition: event.c:2226
char const * file
Source file this event was last updated in.
Definition: event.c:119
void * fr_event_fd_uctx(fr_event_fd_t *ef)
Returns the uctx associated with an fr_event_fd_t handle.
Definition: event.c:1319
int line
Line this event was last updated on.
Definition: event.c:328
uintptr_t armour
protection flag from being deleted.
Definition: event.c:323
struct fr_event_pid::@120 early_exit
Fields that are only used if we're being triggered by a user event.
int kq
instance associated with this event list.
Definition: event.c:426
fr_event_user_cb_t callback
The callback to call.
Definition: event.c:381
talloc_free(reap)
int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
Unarmour an FD.
Definition: event.c:1365
int sock_type
The type of socket SOCK_STREAM, SOCK_RAW etc...
Definition: event.c:305
uint64_t fr_event_list_num_fds(fr_event_list_t *el)
Return the number of file descriptors is_registered with this event loop.
Definition: event.c:594
void * uctx
Context pointer to pass to each file descriptor callback.
Definition: event.c:317
fr_event_func_idx_type_t idx_type
What type of index we use for event to function mapping.
Definition: event.c:166
fr_event_timer_t const ** parent
A pointer to the parent structure containing the timer event.
Definition: event.c:110
#define GET_FUNC(_ef, _offset)
static fr_event_fd_cb_t event_fd_func(fr_event_fd_t *ef, int *filter, int *fflags)
Figure out which function to call given a kevent.
Definition: event.c:522
static int _fr_event_reap_free(fr_event_pid_reap_t *reap)
Definition: event.c:1931
pid_t pid
child to wait for
Definition: event.c:338
bool in_handler
Deletes should be deferred until after the handlers complete.
Definition: event.c:436
static void event_pid_eval(fr_event_list_t *el, struct kevent *kev)
Evaluate a EVFILT_PROC event.
Definition: event.c:1656
int fr_event_list_kq(fr_event_list_t *el)
Return the kq associated with an event list.
Definition: event.c:618
fr_time_t now
The last time the event list was serviced.
Definition: event.c:421
#define fr_time()
Definition: event.c:60
void * uctx
Context for the callback.
Definition: event.c:382
bool is_registered
Whether this user event has been registered with the event loop.
Definition: event.c:335
int fr_event_timer_delete(fr_event_timer_t const **ev_p)
Delete a timer event from the event list.
Definition: event.c:1604
int fr_event_post_insert(fr_event_list_t *el, fr_event_timer_cb_t callback, void *uctx)
Add a post-event callback to the event list.
Definition: event.c:2306
fr_event_list_t * el
Event list this event belongs to.
Definition: event.c:376
bool fr_event_list_empty(fr_event_list_t *el)
Return whether the event loop has any active events.
Definition: event.c:2972
static int _event_build_indexes(UNUSED void *uctx)
Definition: event.c:2813
fr_lst_index_t lst_id
Where to store opaque lst data.
Definition: event.c:113
unsigned int fr_event_list_reap_signal(fr_event_list_t *el, fr_time_delta_t timeout, int signal)
Send a signal to all the processes we have in our reap list, and reap them.
Definition: event.c:1997
int16_t filter
Filter to apply.
Definition: event.c:158
static void event_fd_func_index_build(fr_event_func_map_t *map)
Definition: event.c:447
static void fr_event_fd_noop(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, UNUSED void *uctx)
Placeholder callback to avoid branches in service loop.
Definition: event.c:650
fr_dlist_t entry
If the fr_event_pid is in the detached, reap state, it's inserted into a list associated with the eve...
Definition: event.c:363
bool fr_event_loop_exiting(fr_event_list_t *el)
Check to see whether the event loop is in the process of exiting.
Definition: event.c:2748
fr_dlist_t entry
Linked list of callback.
Definition: event.c:394
int _fr_event_filter_update(NDEBUG_LOCATION_ARGS fr_event_list_t *el, int fd, fr_event_filter_t filter, fr_event_update_t const updates[])
Suspend/resume a subset of filters.
Definition: event.c:993
char const * file
Source file this event was last updated in.
Definition: event.c:385
int _fr_event_fd_move(NDEBUG_LOCATION_ARGS fr_event_list_t *dst, fr_event_list_t *src, int fd, fr_event_filter_t filter)
Move a file descriptor event from one event list to another.
Definition: event.c:942
fr_event_func_map_entry_t ** ev_to_func
Function -> Event maps in index order.
Definition: event.c:169
int _fr_event_fd_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd, fr_event_fd_cb_t read_fn, fr_event_fd_cb_t write_fn, fr_event_error_cb_t error, void *uctx)
Associate I/O callbacks with a file descriptor.
Definition: event.c:1226
fr_event_fd_type_t
Definition: event.c:124
@ FR_EVENT_FD_FILE
is a file.
Definition: event.c:126
@ FR_EVENT_FD_DIRECTORY
is a directory.
Definition: event.c:127
@ FR_EVENT_FD_SOCKET
is a socket.
Definition: event.c:125
fr_event_pid_t const * pid_ev
pid_ev this reaper is bound to.
Definition: event.c:361
fr_event_funcs_t active
Active filter functions.
Definition: event.c:307
int fr_event_pre_insert(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx)
Add a pre-event callback to the event list.
Definition: event.c:2252
int fr_event_timer_run(fr_event_list_t *el, fr_time_t *when)
Run a single scheduled timer event.
Definition: event.c:2356
static void _fr_event_pid_early_exit(fr_event_list_t *el, void *uctx)
Called on the next loop through the event loop when inserting an EVFILT_PROC event fails.
Definition: event.c:1703
static void event_user_eval(fr_event_list_t *el, struct kevent *kev)
Definition: event.c:2152
fr_event_list_t * el
Event list this event belongs to.
Definition: event.c:299
static fr_table_num_sorted_t const kevent_filter_table[]
Definition: event.c:76
TALLOC_CTX * linked_ctx
talloc ctx this event was bound to.
Definition: event.c:318
static void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags)
Definition: event.c:2529
void fr_event_loop_exit(fr_event_list_t *el, int code)
Signal an event loop exit with the specified code.
Definition: event.c:2737
#define FR_EV_BATCH_FDS
Definition: event.c:57
void * uctx
Context pointer to pass to each file descriptor callback.
Definition: event.c:342
static int _event_pid_free(fr_event_pid_t *ev)
Remove PID wait event from kevent if the fr_event_pid_t is freed.
Definition: event.c:1636
fr_event_list_t * el
Event list this event belongs to.
Definition: event.c:360
int fd
File descriptor we're listening for events on.
Definition: event.c:301
size_t offset
Offset of function pointer in structure.
Definition: event.c:156
int will_exit
Will exit on next call to fr_event_corral.
Definition: event.c:415
int fr_event_fd_delete(fr_event_list_t *el, int fd, fr_event_filter_t filter)
Remove a file descriptor from the event loop.
Definition: event.c:1253
fr_dlist_t entry
Linked list of callback.
Definition: event.c:403
int fr_event_loop(fr_event_list_t *el)
Run an event loop.
Definition: event.c:2759
#define EVENT_DEBUG(...)
Definition: event.c:73
int _fr_event_timer_at(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_timer_t const **ev_p, fr_time_t when, fr_event_timer_cb_t callback, void const *uctx)
Insert a timer event into an event list.
Definition: event.c:1444
int exit
If non-zero event loop will prevent the addition of new events, and will return immediately from the ...
Definition: event.c:416
fr_rb_node_t node
Entry in the tree of file descriptor handles.
Definition: event.c:295
fr_event_time_source_t time
Where our time comes from.
Definition: event.c:420
fr_dlist_head_t fd_to_free
File descriptor events pending deletion.
Definition: event.c:439
bool dispatch
Whether the event list is currently dispatching events.
Definition: event.c:422
struct kevent events[FR_EV_BATCH_FDS]
Definition: event.c:434
int _fr_event_filter_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd, fr_event_filter_t filter, void *funcs, fr_event_error_cb_t error, void *uctx)
Insert a filter for the specified fd.
Definition: event.c:1070
#define NOTE_EXITSTATUS
TALLOC_CTX * linked_ctx
talloc ctx this event was bound to.
Definition: event.c:108
fr_event_fd_t * fr_event_fd_handle(fr_event_list_t *el, int fd, fr_event_filter_t filter)
Get the opaque event handle from a file descriptor.
Definition: event.c:1289
fr_event_pid_t const ** parent
Definition: event.c:339
fr_lst_t * times
of timer events to be executed.
Definition: event.c:412
static int _event_user_delete(fr_event_user_t *ev)
Memory will not be freed if we fail to remove the event from the kqueue.
Definition: event.c:2134
fr_time_t when
When this timer should fire.
Definition: event.c:103
fr_event_func_idx_type_t
Definition: event.c:134
@ FR_EVENT_FUNC_IDX_FILTER
Sign flip is performed i.e. -1 = 0The filter is used / as the index in the ev to func index.
Definition: event.c:137
@ FR_EVENT_FUNC_IDX_NONE
Definition: event.c:135
@ FR_EVENT_FUNC_IDX_FFLAGS
The bit position of the flags in FFLAGS is used to provide the index.
Definition: event.c:139
int fr_event_fd_armour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
Armour an FD.
Definition: event.c:1335
fr_event_list_t * el
Event list containing this timer.
Definition: event.c:116
uint32_t fflags
fflags to pass to filter.
Definition: event.c:160
A file descriptor/filter event.
Definition: event.c:294
Specifies a mapping between a function pointer in a structure and its respective event.
Definition: event.c:155
Stores all information relating to an event list.
Definition: event.c:411
Hold additional information for automatically reaped PIDs.
Definition: event.c:359
Callbacks to perform after all timers and FDs have been checked.
Definition: event.c:402
Callbacks to perform when the event handler is about to check the events.
Definition: event.c:393
A timer event.
Definition: event.c:102
Callbacks for kevent() user events.
Definition: event.c:375
int fr_debug_lvl
Definition: log.c:42
fr_log_t default_log
Definition: log.c:290
void fr_vlog(fr_log_t const *log, fr_log_type_t type, char const *file, int line, char const *fmt, va_list ap)
Send a server log message to its destination.
Definition: log.c:343
@ L_DBG_LVL_3
3rd highest priority debug messages (-xxx | -Xx).
Definition: log.h:72
@ L_DBG
Only displayed when debugging is enabled.
Definition: log.h:59
int fr_lst_extract(fr_lst_t *lst, void *data)
Remove an element from an LST.
Definition: lst.c:715
void * fr_lst_iter_next(fr_lst_t *lst, fr_lst_iter_t *iter)
Get the next entry in an LST.
Definition: lst.c:785
int fr_lst_insert(fr_lst_t *lst, void *data)
Definition: lst.c:731
unsigned int fr_lst_num_elements(fr_lst_t *lst)
Definition: lst.c:750
void * fr_lst_peek(fr_lst_t *lst)
Definition: lst.c:701
void * fr_lst_iter_init(fr_lst_t *lst, fr_lst_iter_t *iter)
Iterate over entries in LST.
Definition: lst.c:766
Definition: lst.c:60
#define fr_lst_talloc_alloc(_ctx, _cmp, _talloc_type, _field, _init)
Creates an LST that verifies elements are of a specific talloc type.
Definition: lst.h:80
fr_lst_index_t fr_lst_iter_t
Definition: lst.h:45
unsigned int fr_lst_index_t
Definition: lst.h:43
static uint8_t fr_high_bit_pos(uint64_t num)
Find the highest order high bit in an unsigned 64 bit integer.
Definition: math.h:36
unsigned short uint16_t
Definition: merged_model.c:31
unsigned int uint32_t
Definition: merged_model.c:33
long int ssize_t
Definition: merged_model.c:24
unsigned char uint8_t
Definition: merged_model.c:30
unsigned long int size_t
Definition: merged_model.c:25
static size_t array[MY_ARRAY_SIZE]
void fr_rand_init(void)
Definition: rand.c:34
uint32_t randrsl[256]
Definition: rand.h:40
uint32_t randcnt
Definition: rand.h:39
uint32_t fr_rb_num_elements(fr_rb_tree_t *tree)
Return how many nodes there are in a tree.
Definition: rb.c:775
void * fr_rb_iter_next_inorder(fr_rb_iter_inorder_t *iter)
Return the next node.
Definition: rb.c:844
bool fr_rb_insert(fr_rb_tree_t *tree, void const *data)
Insert data into a tree.
Definition: rb.c:624
bool fr_rb_delete(fr_rb_tree_t *tree, void const *data)
Remove node and free data (if a free function was specified)
Definition: rb.c:736
void * fr_rb_iter_init_inorder(fr_rb_iter_inorder_t *iter, fr_rb_tree_t *tree)
Initialise an in-order iterator.
Definition: rb.c:818
void * fr_rb_find(fr_rb_tree_t const *tree, void const *data)
Find an element in the tree, returning the data, not the node.
Definition: rb.c:576
#define fr_rb_inline_talloc_alloc(_ctx, _type, _field, _data_cmp, _data_free)
Allocs a red black that verifies elements are of a specific talloc type.
Definition: rb.h:246
#define fr_rb_inline_alloc(_ctx, _type, _field, _data_cmp, _data_free)
Allocs a red black tree.
Definition: rb.h:271
Iterator structure for in-order traversal of an rbtree.
Definition: rb.h:321
The main red black tree structure.
Definition: rb.h:73
return count
Definition: module.c:175
fr_assert(0)
char const * fr_syserror(int num)
Guaranteed to be thread-safe version of strerror.
Definition: syserror.c:243
#define fr_table_str_by_value(_table, _number, _def)
Convert an integer to a string.
Definition: table.h:253
An element in a lexicographically sorted array of name to num mappings.
Definition: table.h:45
int talloc_link_ctx(TALLOC_CTX *parent, TALLOC_CTX *child)
Link two different parent and child contexts, so the child is freed before the parent.
Definition: talloc.c:167
static TALLOC_CTX * talloc_init_const(char const *name)
Allocate a top level chunk with a constant name.
Definition: talloc.h:112
#define fr_time_delta_to_timespec(_delta)
Convert a delta to a timespec.
Definition: time.h:664
static int8_t fr_time_delta_cmp(fr_time_delta_t a, fr_time_delta_t b)
Compare two fr_time_delta_t values.
Definition: time.h:928
static int64_t fr_time_unwrap(fr_time_t time)
Definition: time.h:146
static fr_time_delta_t fr_time_delta_from_sec(int64_t sec)
Definition: time.h:588
#define fr_time_delta_wrap(_time)
Definition: time.h:152
#define fr_time_wrap(_time)
Definition: time.h:145
#define fr_time_lteq(_a, _b)
Definition: time.h:240
#define fr_time_delta_ispos(_a)
Definition: time.h:288
static int64_t fr_time_to_usec(fr_time_t when)
Convert an fr_time_t (internal time) to number of usec since the unix epoch (wallclock time)
Definition: time.h:699
#define fr_time_add(_a, _b)
Add a time/time delta together.
Definition: time.h:196
#define fr_time_gt(_a, _b)
Definition: time.h:237
#define USEC
Definition: time.h:378
#define fr_time_sub(_a, _b)
Subtract one time from another.
Definition: time.h:229
static int8_t fr_time_cmp(fr_time_t a, fr_time_t b)
Compare two fr_time_t values.
Definition: time.h:914
A time delta, a difference in time measured in nanoseconds.
Definition: time.h:80
"server local" time.
Definition: time.h:69
close(uq->fd)
static fr_event_list_t * el
void fr_strerror_clear(void)
Clears all pending messages from the talloc pools.
Definition: strerror.c:577
char const * fr_strerror(void)
Get the last library error.
Definition: strerror.c:554
#define fr_strerror_printf(_fmt,...)
Log to thread local error buffer.
Definition: strerror.h:64
#define fr_strerror_printf_push(_fmt,...)
Add a message to an existing stack of messages at the tail.
Definition: strerror.h:84
#define fr_strerror_const_push(_msg)
Definition: strerror.h:227
#define fr_strerror_const(_msg)
Definition: strerror.h:223
int nonnull(2, 5))
int format(printf, 5, 0))
static size_t char ** out
Definition: value.h:984