The FreeRADIUS server $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
Loading...
Searching...
No Matches
event.c
Go to the documentation of this file.
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
15 */
16
17/** Wrapper around libkqueue to make managing events easier
18 *
19 * Non-thread-safe event handling specific to FreeRADIUS.
20 *
21 * By non-thread-safe we mean multiple threads can't insert/delete
22 * events concurrently into the same event list without synchronization.
23 *
24 * @file src/lib/util/event.c
25 *
26 * @copyright 2007-2016 The FreeRADIUS server project
27 * @copyright 2016 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
28 * @copyright 2007 Alan DeKok (aland@freeradius.org)
29 */
30RCSID("$Id: e4a2c3bb589a2bcda1b3b19ff8679cff574e1eb3 $")
31
32#define _EVENT_LIST_PRIVATE 1
34
35#include <freeradius-devel/util/dlist.h>
36#include <freeradius-devel/util/event.h>
37#include <freeradius-devel/util/timer.h>
38#include <freeradius-devel/util/log.h>
39#include <freeradius-devel/util/rb.h>
40#include <freeradius-devel/util/strerror.h>
41#include <freeradius-devel/util/syserror.h>
42#include <freeradius-devel/util/table.h>
43#include <freeradius-devel/util/token.h>
44#include <freeradius-devel/util/atexit.h>
45
46#include <sys/stat.h>
47#include <sys/wait.h>
48#include <pthread.h>
49
50#ifdef NDEBUG
51/*
52 * Turn off documentation warnings as file/line
53 * args aren't used for non-debug builds.
54 */
56DIAG_OFF(documentation)
58#endif
59
60#define FR_EV_BATCH_FDS (256)
61
62DIAG_OFF(unused-macros)
63#define fr_time() static_assert(0, "Use el->time for event loop timing")
64DIAG_ON(unused-macros)
65
66#if !defined(SO_GET_FILTER) && defined(SO_ATTACH_FILTER)
67# define SO_GET_FILTER SO_ATTACH_FILTER
68#endif
69
71#ifdef EVFILT_AIO
72 { L("EVFILT_AIO"), EVFILT_AIO },
73#endif
74#ifdef EVFILT_EXCEPT
75 { L("EVFILT_EXCEPT"), EVFILT_EXCEPT },
76#endif
77#ifdef EVFILT_MACHPORT
78 { L("EVFILT_MACHPORT"), EVFILT_MACHPORT },
79#endif
80 { L("EVFILT_PROC"), EVFILT_PROC },
81 { L("EVFILT_READ"), EVFILT_READ },
82 { L("EVFILT_SIGNAL"), EVFILT_SIGNAL },
83 { L("EVFILT_TIMER"), EVFILT_TIMER },
84 { L("EVFILT_VNODE"), EVFILT_VNODE },
85 { L("EVFILT_WRITE"), EVFILT_WRITE }
86};
88
89#ifdef EVFILT_LIBKQUEUE
90static int log_conf_kq;
91#endif
92
93typedef enum {
94 FR_EVENT_FD_SOCKET = 1, //!< is a socket.
95 FR_EVENT_FD_FILE = 2, //!< is a file.
96 FR_EVENT_FD_DIRECTORY = 4, //!< is a directory.
97
98#ifdef SO_GET_FILTER
100#endif
102
103typedef enum {
105
106 FR_EVENT_FUNC_IDX_FILTER, //!< Sign flip is performed i.e. -1 = 0The filter is used
107 //// as the index in the ev to func index.
108 FR_EVENT_FUNC_IDX_FFLAGS //!< The bit position of the flags in FFLAGS
109 ///< is used to provide the index.
110 ///< i.e. 0x01 -> 0, 0x02 -> 1, 0x08 -> 3 etc..
112
113#ifndef SO_GET_FILTER
114# define FR_EVENT_FD_PCAP 0
115#endif
116
117/** Specifies a mapping between a function pointer in a structure and its respective event
118 *
119 * If the function pointer at the specified offset is set, then a matching event
120 * will be added.
121 *
122 * If the function pointer is NULL, then any existing events will be removed.
123 */
124typedef struct {
125 size_t offset; //!< Offset of function pointer in structure.
126 char const *name; //!< Name of the event.
127 int16_t filter; //!< Filter to apply.
128 uint16_t flags; //!< Flags to use for inserting event.
129 uint32_t fflags; //!< fflags to pass to filter.
130 int type; //!< Type this filter applies to.
131 bool coalesce; //!< Coalesce this map with the next.
133
134typedef struct {
135 fr_event_func_idx_type_t idx_type; //!< What type of index we use for
136 ///< event to function mapping.
137 fr_event_func_map_entry_t *func_to_ev; //!< Function -> Event maps coalesced, out of order.
138 fr_event_func_map_entry_t **ev_to_func; //!< Function -> Event maps in index order.
140
144 .func_to_ev = (fr_event_func_map_entry_t[]){
145 {
146 .offset = offsetof(fr_event_io_func_t, read),
147 .name = "read",
148 .filter = EVFILT_READ,
149 .flags = EV_ADD | EV_ENABLE,
150#ifdef NOTE_NONE
151 .fflags = NOTE_NONE,
152#else
153 .fflags = 0,
154#endif
156 },
157 {
158 .offset = offsetof(fr_event_io_func_t, write),
159 .name = "write",
160 .filter = EVFILT_WRITE,
161 .flags = EV_ADD | EV_ENABLE,
162 .fflags = 0,
164 },
165 { 0 }
166 }
167 },
169 .idx_type = FR_EVENT_FUNC_IDX_FFLAGS,
170 .func_to_ev = (fr_event_func_map_entry_t[]){
171 {
172 .offset = offsetof(fr_event_vnode_func_t, delete),
173 .name = "delete",
174 .filter = EVFILT_VNODE,
175 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
176 .fflags = NOTE_DELETE,
178 .coalesce = true
179 },
180 {
181 .offset = offsetof(fr_event_vnode_func_t, write),
182 .name = "write",
183 .filter = EVFILT_VNODE,
184 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
185 .fflags = NOTE_WRITE,
186 .type = FR_EVENT_FD_FILE,
187 .coalesce = true
188 },
189 {
190 .offset = offsetof(fr_event_vnode_func_t, extend),
191 .name = "extend",
192 .filter = EVFILT_VNODE,
193 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
194 .fflags = NOTE_EXTEND,
196 .coalesce = true
197 },
198 {
199 .offset = offsetof(fr_event_vnode_func_t, attrib),
200 .name = "attrib",
201 .filter = EVFILT_VNODE,
202 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
203 .fflags = NOTE_ATTRIB,
204 .type = FR_EVENT_FD_FILE,
205 .coalesce = true
206 },
207 {
208 .offset = offsetof(fr_event_vnode_func_t, link),
209 .name = "link",
210 .filter = EVFILT_VNODE,
211 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
212 .fflags = NOTE_LINK,
213 .type = FR_EVENT_FD_FILE,
214 .coalesce = true
215 },
216 {
217 .offset = offsetof(fr_event_vnode_func_t, rename),
218 .name = "rename",
219 .filter = EVFILT_VNODE,
220 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
221 .fflags = NOTE_RENAME,
222 .type = FR_EVENT_FD_FILE,
223 .coalesce = true
224 },
225#ifdef NOTE_REVOKE
226 {
227 .offset = offsetof(fr_event_vnode_func_t, revoke),
228 .name = "revoke",
229 .filter = EVFILT_VNODE,
230 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
231 .fflags = NOTE_REVOKE,
232 .type = FR_EVENT_FD_FILE,
233 .coalesce = true
234 },
235#endif
236#ifdef NOTE_FUNLOCK
237 {
238 .offset = offsetof(fr_event_vnode_func_t, funlock),
239 .name = "funlock",
240 .filter = EVFILT_VNODE,
241 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
242 .fflags = NOTE_FUNLOCK,
243 .type = FR_EVENT_FD_FILE,
244 .coalesce = true
245 },
246#endif
247 { 0 }
248 }
249 }
250};
251
253 { L("directory"), FR_EVENT_FD_DIRECTORY },
254 { L("file"), FR_EVENT_FD_FILE },
255 { L("pcap"), FR_EVENT_FD_PCAP },
256 { L("socket"), FR_EVENT_FD_SOCKET }
257};
259
260/** A file descriptor/filter event
261 *
262 */
264 fr_rb_node_t node; //!< Entry in the tree of file descriptor handles.
265 ///< this should really go away and we should pass around
266 ///< handles directly.
267
268 fr_event_list_t *el; //!< Event list this event belongs to.
270 int fd; //!< File descriptor we're listening for events on.
271
272 fr_event_fd_type_t type; //!< Type of events we're interested in.
273
274 int sock_type; //!< The type of socket SOCK_STREAM, SOCK_RAW etc...
275
276 fr_event_funcs_t active; //!< Active filter functions.
277 fr_event_funcs_t stored; //!< Stored (set, but inactive) filter functions.
278
279 fr_event_error_cb_t error; //!< Callback for when an error occurs on the FD.
280
281 fr_event_func_map_t const *map; //!< Function map between #fr_event_funcs_t and kevent filters.
282
283 bool is_registered; //!< Whether this fr_event_fd_t's FD has been registered with
284 ///< kevent. Mostly for debugging.
285
286 void *uctx; //!< Context pointer to pass to each file descriptor callback.
287 TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to.
288
289 fr_dlist_t entry; //!< Entry in free list.
290
291#ifndef NDEBUG
292 uintptr_t armour; //!< protection flag from being deleted.
293#endif
294
295#ifndef NDEBUG
296 char const *file; //!< Source file this event was last updated in.
297 int line; //!< Line this event was last updated on.
298#endif
299};
300
302 fr_event_list_t *el; //!< Event list this event belongs to.
303
304 bool is_registered; //!< Whether this user event has been registered
305 ///< with the event loop.
306
307 pid_t pid; //!< child to wait for
309
310 fr_event_pid_cb_t callback; //!< callback to run when the child exits
311 void *uctx; //!< Context pointer to pass to each file descriptor callback.
312
313 /** Fields that are only used if we're being triggered by a user event
314 */
315 struct {
316 fr_event_user_t *ev; //!< Fallback user event we use to raise a PID event when
317 ///< a race occurs with kevent.
318 int status; //!< Status we got from waitid.
320#ifndef NDEBUG
321 char const *file; //!< Source file this event was last updated in.
322 int line; //!< Line this event was last updated on.
323#endif
324};
325
326/** Hold additional information for automatically reaped PIDs
327 */
328typedef struct {
329 fr_event_list_t *el; //!< Event list this event belongs to.
330 fr_event_pid_t const *pid_ev; //!< pid_ev this reaper is bound to.
331
332 fr_dlist_t entry; //!< If the fr_event_pid is in the detached, reap state,
333 ///< it's inserted into a list associated with the event.
334 //!< We then send SIGKILL, and forcefully reap the process
335 ///< on exit.
336
337 fr_event_pid_cb_t callback; //!< callback to run when the child exits
338 void *uctx; //!< Context pointer to pass to each file descriptor callback.
340
341/** Callbacks for kevent() user events
342 *
343 */
345 fr_event_list_t *el; //!< Event list this event belongs to.
346
347 bool is_registered; //!< Whether this user event has been registered
348 ///< with the event loop.
349
350 fr_event_user_cb_t callback; //!< The callback to call.
351 void *uctx; //!< Context for the callback.
352
353#ifndef NDEBUG
354 char const *file; //!< Source file this event was last updated in.
355 int line; //!< Line this event was last updated on.
356#endif
357};
358
359/** Callbacks to perform when the event handler is about to check the events
360 *
361 */
362typedef struct {
363 fr_dlist_t entry; //!< Linked list of callback.
364 fr_event_status_cb_t callback; //!< The callback to call.
365 void *uctx; //!< Context for the callback.
367
368/** Callbacks to perform after all timers and FDs have been checked
369 *
370 */
371typedef struct {
372 fr_dlist_t entry; //!< Linked list of callback.
373 fr_event_post_cb_t callback; //!< The callback to call.
374 void *uctx; //!< Context for the callback.
376
377/** Stores all information relating to an event list
378 *
379 */
381 struct fr_event_list_pub_s pub; //!< Next event list in the chain.
382 fr_rb_tree_t *fds; //!< Tree used to track FDs with filters in kqueue.
383
384 int will_exit; //!< Will exit on next call to fr_event_corral.
385 int exit; //!< If non-zero event loop will prevent the addition
386 ///< of new events, and will return immediately
387 ///< from the corral/service function.
388
389 bool dispatch; //!< Whether the event list is currently dispatching events.
390
391 int num_fd_events; //!< Number of events in this event list.
392
393 int kq; //!< instance associated with this event list.
394
395 fr_dlist_head_t pre_callbacks; //!< callbacks when we may be idle...
396 fr_dlist_head_t post_callbacks; //!< post-processing callbacks
397
398 fr_dlist_head_t pid_to_reap; //!< A list of all orphaned child processes we're
399 ///< waiting to reap.
400
401 struct kevent events[FR_EV_BATCH_FDS]; /* so it doesn't go on the stack every time */
402
403 bool in_handler; //!< Deletes should be deferred until after the
404 ///< handlers complete.
405
406 fr_dlist_head_t fd_to_free; //!< File descriptor events pending deletion.
407
408#ifdef WITH_EVENT_DEBUG
409 fr_timer_t *report; //!< Report event.
410#endif
411};
412
414{
415 switch (map->idx_type) {
416 default:
417 return;
418
419 /*
420 * - Figure out the lowest filter value
421 * - Invert it
422 * - Allocate an array
423 * - Populate the array
424 */
426 {
427 int low = 0;
429
430 for (entry = map->func_to_ev; entry->name; entry++) if (entry->filter < low) low = entry->filter;
431
432 map->ev_to_func = talloc_zero_array(NULL, fr_event_func_map_entry_t *, ~low + 1);
433 if (unlikely(!map->ev_to_func)) abort();
434
435 for (entry = map->func_to_ev; entry->name; entry++) map->ev_to_func[~entry->filter] = entry;
436 }
437 break;
438
439 /*
440 * - Figure out the highest bit position
441 * - Allocate an array
442 * - Populate the array
443 */
445 {
446 uint8_t high = 0, pos;
448
449 for (entry = map->func_to_ev; entry->name; entry++) {
450 pos = fr_high_bit_pos(entry->fflags);
451 if (pos > high) high = pos;
452 }
453
454 map->ev_to_func = talloc_zero_array(NULL, fr_event_func_map_entry_t *, high);
455 if (unlikely(!map->ev_to_func)) abort();
456
457 for (entry = map->func_to_ev; entry->name; entry++) {
458 typeof_field(fr_event_func_map_entry_t, fflags) fflags = entry->fflags;
459
460 /*
461 * Multiple notes can be associated
462 * with the same function.
463 */
464 while ((pos = fr_high_bit_pos(fflags))) {
465 pos -= 1;
466 map->ev_to_func[pos] = entry;
467 /*
468 * Coverity thinks that after this decrement, pos
469 * can be 255 even though the loop condition precludes
470 * it. Adding a Coverity-only check won't change that,
471 * so we're stuck with annotation.
472 */
473 /* coverity[overflow_const] */
474 fflags &= ~(1 << pos);
475 }
476 }
477 }
478 break;
479 }
480}
481
482/** Figure out which function to call given a kevent
483 *
484 * This function should be called in a loop until it returns NULL.
485 *
486 * @param[in] ef File descriptor state handle.
487 * @param[in] filter from the kevent.
488 * @param[in,out] fflags from the kevent. Each call will return the function
489 * from the next most significant NOTE_*, with each
490 * NOTE_* before unset from fflags.
491 * @return
492 * - NULL there are no more callbacks to call.
493 * - The next callback to call.
494 */
495static inline CC_HINT(always_inline) fr_event_fd_cb_t event_fd_func(fr_event_fd_t *ef, int *filter, int *fflags)
496{
497 fr_event_func_map_t const *map = ef->map;
498
499#define GET_FUNC(_ef, _offset) *((fr_event_fd_cb_t const *)((uint8_t const *)&(_ef)->active + _offset))
500
501 switch (map->idx_type) {
502 default:
503 fr_assert_fail("Invalid index type %u", map->idx_type);
504 return NULL;
505
507 {
508 int idx;
509
510 if (!*filter) return NULL;
511
512 idx = ~*filter; /* Consume the filter */
513 *filter = 0;
514
515 return GET_FUNC(ef, map->ev_to_func[idx]->offset);
516 }
517
519 {
520 int our_fflags = *fflags;
521 uint8_t pos = fr_high_bit_pos(our_fflags);
522
523 if (!pos) return NULL; /* No more fflags to consume */
524 pos -= 1; /* Saves an array element */
525
526 *fflags = our_fflags & ~(1 << pos); /* Consume the knote */
527
528 return GET_FUNC(ef, map->ev_to_func[pos]->offset);
529 }
530 }
531}
532
533/** Compare two file descriptor handles
534 *
535 * @param[in] one the first file descriptor handle.
536 * @param[in] two the second file descriptor handle.
537 * @return CMP(one, two)
538 */
539static int8_t fr_event_fd_cmp(void const *one, void const *two)
540{
541 fr_event_fd_t const *a = one, *b = two;
542
543 CMP_RETURN(a, b, fd);
544
545 return CMP(a->filter, b->filter);
546}
547
548/** Return the number of file descriptors is_registered with this event loop
549 *
550 */
552{
553 if (unlikely(!el)) return -1;
554
555 return fr_rb_num_elements(el->fds);
556}
557
558/** Return the number of timer events currently scheduled
559 *
560 * @param[in] el to return timer events for.
561 * @return number of timer events.
562 */
564{
565 if (unlikely(!el)) return -1;
566
568}
569
570/** Return the kq associated with an event list.
571 *
572 * @param[in] el to return timer events for.
573 * @return kq
574 */
576{
577 if (unlikely(!el)) return -1;
578
579 return el->kq;
580}
581
582/** Get the current server time according to the event list
583 *
584 * If the event list is currently dispatching events, we return the time
585 * this iteration of the event list started.
586 *
587 * If the event list is not currently dispatching events, we return the
588 * current system time.
589 *
590 * @param[in] el to get time from.
591 * @return the current time according to the event list.
592 */
594{
595 return el->pub.tl->time();
596}
597
598/** Placeholder callback to avoid branches in service loop
599 *
600 * This is set in place of any NULL function pointers, so that the event loop doesn't
601 * SEGV if a filter callback function is unset between corral and service.
602 */
603static void fr_event_fd_noop(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, UNUSED void *uctx)
604{
605 return;
606}
607
608/** Build a new evset based on function pointers present
609 *
610 * @note The contents of active functions may be inconsistent if this function errors. But the
611 * only time that will occur is if the caller passed invalid arguments.
612 *
613 * @param[in] el we're building events for.
614 * @param[out] out_kev where to write the evset.
615 * @param[in] outlen length of output buffer.
616 * @param[out] active The set of function pointers with active filters.
617 * @param[in] ef event to insert.
618 * @param[in] new Functions to map to filters.
619 * @param[in] prev Previous set of functions mapped to filters.
620 * @return
621 * - >= 0 the number of changes written to out.
622 * - < 0 an error occurred.
623 */
625#ifndef WITH_EVENT_DEBUG
626 UNUSED
627#endif
629 struct kevent out_kev[], size_t outlen, fr_event_funcs_t *active,
630 fr_event_fd_t *ef,
631 fr_event_funcs_t const *new, fr_event_funcs_t const *prev)
632{
633 struct kevent *out = out_kev, *end = out + outlen;
634 fr_event_func_map_entry_t const *map;
635 struct kevent add[10], *add_p = add;
636 size_t i;
637
638 EVENT_DEBUG("%p - Building new evset for FD %i (new %p, prev %p)", el, ef->fd, new, prev);
639
640 /*
641 * Iterate over the function map, setting/unsetting
642 * filters and filter flags.
643 */
644 for (map = ef->map->func_to_ev; map->name; map++) {
645 bool has_current_func = false;
646 bool has_prev_func = false;
647 uint32_t current_fflags = 0;
648 uint32_t prev_fflags = 0;
649
650 do {
651 fr_event_fd_cb_t prev_func;
652 fr_event_fd_cb_t new_func;
653
654 /*
655 * If the previous value was the 'noop'
656 * callback, it's identical to being unset.
657 */
658 prev_func = *(fr_event_fd_cb_t const *)((uint8_t const *)prev + map->offset);
659 if (prev_func && (prev_func != fr_event_fd_noop)) {
660 EVENT_DEBUG("\t%s prev set (%p)", map->name, prev_func);
661 prev_fflags |= map->fflags;
662 has_prev_func = true;
663 } else {
664 EVENT_DEBUG("\t%s prev unset", map->name);
665 }
666
667 new_func = *(fr_event_fd_cb_t const *)((uint8_t const *)new + map->offset);
668 if (new_func && (new_func != fr_event_fd_noop)) {
669 EVENT_DEBUG("\t%s curr set (%p)", map->name, new_func);
670 current_fflags |= map->fflags;
671 has_current_func = true;
672
673 /*
674 * Check the filter will work for the
675 * type of file descriptor specified.
676 */
677 if (!(map->type & ef->type)) {
678 fr_strerror_printf("kevent %s (%s), can't be applied to fd of type %s",
679 map->name,
682 map->type, "<INVALID>"));
683 return -1;
684 }
685
686 /*
687 * Mark this filter function as active
688 */
689 memcpy((uint8_t *)active + map->offset, (uint8_t const *)new + map->offset,
690 sizeof(fr_event_fd_cb_t));
691 } else {
692 EVENT_DEBUG("\t%s curr unset", map->name);
693
694 /*
695 * Mark this filter function as inactive
696 * by setting it to the 'noop' callback.
697 */
698 *((fr_event_fd_cb_t *)((uint8_t *)active + map->offset)) = fr_event_fd_noop;
699 }
700
701 if (!(map + 1)->coalesce) break;
702 map++;
703 } while (1);
704
705 if (out > end) {
706 fr_strerror_const("Out of memory to store kevent filters");
707 return -1;
708 }
709
710 /*
711 * Upsert if we add a function or change the flags.
712 */
713 if (has_current_func &&
714 (!has_prev_func || (current_fflags != prev_fflags))) {
715 if ((size_t)(add_p - add) >= (NUM_ELEMENTS(add))) {
716 fr_strerror_const("Out of memory to store kevent EV_ADD filters");
717 return -1;
718 }
719 EVENT_DEBUG("\tEV_SET EV_ADD filter %s (%i), flags %i, fflags %i",
721 map->filter, map->flags, current_fflags);
722 EV_SET(add_p++, ef->fd, map->filter, map->flags, current_fflags, 0, ef);
723
724 /*
725 * Delete if we remove a function.
726 */
727 } else if (!has_current_func && has_prev_func) {
728 EVENT_DEBUG("\tEV_SET EV_DELETE filter %s (%i), flags %i, fflags %i",
730 map->filter, EV_DELETE, 0);
731 EV_SET(out++, ef->fd, map->filter, EV_DELETE, 0, 0, ef);
732 }
733 }
734
735 /*
736 * kevent is fine with adds/deletes in the same operation
737 * on the same file descriptor, but libkqueue doesn't do
738 * any kind of coalescing or ordering so you get an EEXIST
739 * error.
740 */
741 for (i = 0; i < (size_t)(add_p - add); i++) memcpy(out++, &add[i], sizeof(*out));
742
743 return out - out_kev;
744}
745
746/** Discover the type of a file descriptor
747 *
748 * This function writes the result of the discovery to the ef->type,
749 * and ef->sock_type fields.
750 *
751 * @param[out] ef to write type data to.
752 * @param[in] fd to discover the type of.
753 * @return
754 * - 0 on success.
755 * - -1 on failure.
756 */
757static int fr_event_fd_type_set(fr_event_fd_t *ef, int fd)
758{
759 socklen_t opt_len = sizeof(ef->sock_type);
760
761 /*
762 * It's a socket or PCAP socket
763 */
764 if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &ef->sock_type, &opt_len) == 0) {
765#ifdef SO_GET_FILTER
766 opt_len = 0;
767 if (unlikely(getsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, NULL, &opt_len) < 0)) {
768 fr_strerror_printf("Failed determining PF status: %s", fr_syserror(errno));
769 return -1;
770 }
771 if (opt_len) {
773 } else
774#endif
775 {
777 }
778
779 /*
780 * It's a file or directory
781 */
782 } else {
783 struct stat buf;
784
785 if (errno != ENOTSOCK) {
786 fr_strerror_printf("Failed retrieving socket type: %s", fr_syserror(errno));
787 return -1;
788 }
789
790 if (fstat(fd, &buf) < 0) {
791 fr_strerror_printf("Failed calling stat() on file: %s", fr_syserror(errno));
792 return -1;
793 }
794
795 if (S_ISDIR(buf.st_mode)) {
797 } else {
799 }
800 }
801 ef->fd = fd;
802
803 return 0;
804}
805
806/** Remove a file descriptor from the event loop and rbtree but don't explicitly free it
807 *
808 *
809 * @param[in] ef to remove.
810 * @return
811 * - 0 on success.
812 * - -1 on error;
813 */
815{
816 struct kevent evset[10];
817 int count = 0;
818 fr_event_list_t *el = ef->el;
819 fr_event_funcs_t funcs;
820
821 /*
822 * Already been removed from the various trees and
823 * the event loop.
824 */
825 if (ef->is_registered) {
826 memset(&funcs, 0, sizeof(funcs));
827
828 fr_assert(ef->armour == 0);
829
830 /*
831 * If this fails, it's a pretty catastrophic error.
832 */
833 count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
834 &ef->active, ef, &funcs, &ef->active);
835 if (count > 0) {
836 int ret;
837
838 /*
839 * If this fails, assert on debug builds.
840 */
841 ret = kevent(el->kq, evset, count, NULL, 0, NULL);
842 if (!fr_cond_assert_msg(ret >= 0,
843 "FD %i was closed without being removed from the KQ: %s",
844 ef->fd, fr_syserror(errno))) {
845 return -1; /* Prevent the free, and leave the fd in the trees */
846 }
847 }
848
849 fr_rb_delete(el->fds, ef);
850 ef->is_registered = false;
851 }
852
853 /*
854 * Insert into the deferred free list, event will be
855 * freed later.
856 */
857 if (el->in_handler) {
858 /*
859 * Don't allow the same event to be
860 * inserted into the free list multiple
861 * times.
862 *
863 * This can happen if the same ef is
864 * delivered by multiple filters, i.e.
865 * if EVFILT_READ and EVFILT_WRITE
866 * were both high, and both handlers
867 * attempted to delete the event
868 * we'd need to prevent the event being
869 * inserted into the free list multiple
870 * times.
871 */
873 return -1; /* Will be freed later */
874 } else if (fr_dlist_entry_in_list(&ef->entry)) {
876 }
877
878 return 0;
879}
880
881/** Move a file descriptor event from one event list to another
882 *
883 * FIXME - Move suspended events too.
884 *
885 * @note Any pending events will not be transferred.
886 *
887 * @param[in] dst Event list to move file descriptor event to.
888 * @param[in] src Event list to move file descriptor from.
889 * @param[in] fd of the event to move.
890 * @param[in] filter of the event to move.
891 * @return
892 * - 0 on success.
893 * - -1 on failure. The event will remain active in the src list.
894 */
896 fr_event_list_t *dst, fr_event_list_t *src, int fd, fr_event_filter_t filter)
897{
898 fr_event_fd_t *ef;
899 int ret;
900
901 if (fr_event_loop_exiting(dst)) {
902 fr_strerror_const("Destination event loop exiting");
903 return -1;
904 }
905
906 /*
907 * Ensure this exists
908 */
909 ef = fr_rb_find(src->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
910 if (unlikely(!ef)) {
911 fr_strerror_printf("No events are registered for fd %i", fd);
912 return -1;
913 }
914
916 ef->linked_ctx, NULL,
917 dst, ef->fd, ef->filter, &ef->active, ef->error, ef->uctx);
918 if (ret < 0) return -1;
919
920 (void)fr_event_fd_delete(src, ef->fd, ef->filter);
921
922 return ret;
923}
924
925
926/** Suspend/resume a subset of filters
927 *
928 * This function trades producing useful errors for speed.
929 *
930 * An example of suspending the read filter for an FD would be:
931 @code {.c}
932 static fr_event_update_t pause_read[] = {
933 FR_EVENT_SUSPEND(fr_event_io_func_t, read),
934 { 0 }
935 }
936
937 fr_event_filter_update(el, fd, FR_EVENT_FILTER_IO, pause_read);
938 @endcode
939 *
940 * @param[in] el to update descriptor in.
941 * @param[in] fd to update filters for.
942 * @param[in] filter The type of filter to update.
943 * @param[in] updates An array of updates to toggle filters on/off without removing
944 * the callback function.
945 */
947 fr_event_list_t *el, int fd, fr_event_filter_t filter, fr_event_update_t const updates[])
948{
949 fr_event_fd_t *ef;
950 size_t i;
951 fr_event_funcs_t curr_active, curr_stored;
952 struct kevent evset[10];
953 int count = 0;
954
955 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
956 if (unlikely(!ef)) {
957 fr_strerror_printf("No events are registered for fd %i", fd);
958 return -1;
959 }
960
961#ifndef NDEBUG
962 ef->file = file;
963 ef->line = line;
964#endif
965
966 /*
967 * Cheapest way of ensuring this function can error without
968 * leaving everything in an inconsistent state.
969 */
970 memcpy(&curr_active, &ef->active, sizeof(curr_active));
971 memcpy(&curr_stored, &ef->stored, sizeof(curr_stored));
972
973 /*
974 * Apply modifications to our copies of the active/stored array.
975 */
976 for (i = 0; updates[i].op; i++) {
977 switch (updates[i].op) {
978 default:
980 fr_assert(ef->armour == 0); /* can't suspect protected FDs */
981 memcpy((uint8_t *)&ef->stored + updates[i].offset,
982 (uint8_t *)&ef->active + updates[i].offset, sizeof(fr_event_fd_cb_t));
983 memset((uint8_t *)&ef->active + updates[i].offset, 0, sizeof(fr_event_fd_cb_t));
984 break;
985
987 memcpy((uint8_t *)&ef->active + updates[i].offset,
988 (uint8_t *)&ef->stored + updates[i].offset, sizeof(fr_event_fd_cb_t));
989 memset((uint8_t *)&ef->stored + updates[i].offset, 0, sizeof(fr_event_fd_cb_t));
990 break;
991 }
992 }
993
994 count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset), &ef->active,
995 ef, &ef->active, &curr_active);
996 if (unlikely(count < 0)) {
997 error:
998 memcpy(&ef->active, &curr_active, sizeof(curr_active));
999 memcpy(&ef->stored, &curr_stored, sizeof(curr_stored));
1000 return -1;
1001 }
1002
1003 if (count && unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0)) {
1004 fr_strerror_printf("Failed updating filters for FD %i: %s", ef->fd, fr_syserror(errno));
1005 goto error;
1006 }
1007
1008 return 0;
1009}
1010
1011/** Insert a filter for the specified fd
1012 *
1013 * @param[in] ctx to bind lifetime of the event to.
1014 * @param[out] ef_out Previously allocated ef, or NULL.
1015 * @param[in] el to insert fd callback into.
1016 * @param[in] fd to install filters for.
1017 * @param[in] filter one of the #fr_event_filter_t values.
1018 * @param[in] funcs Structure containing callback functions. If a function pointer
1019 * is set, the equivalent kevent filter will be installed.
1020 * @param[in] error function to call when an error occurs on the fd.
1021 * @param[in] uctx to pass to handler.
1022 */
1024 TALLOC_CTX *ctx, fr_event_fd_t **ef_out,
1025 fr_event_list_t *el, int fd,
1026 fr_event_filter_t filter,
1027 void *funcs, fr_event_error_cb_t error,
1028 void *uctx)
1029{
1030 ssize_t count;
1031 fr_event_fd_t *ef;
1032 fr_event_funcs_t active;
1033 struct kevent evset[10];
1034
1035 if (unlikely(!el)) {
1036 fr_strerror_const("Invalid argument: NULL event list");
1037 return -1;
1038 }
1039
1040 if (unlikely(fd < 0)) {
1041 fr_strerror_printf("Invalid arguments: Bad FD %i", fd);
1042 return -1;
1043 }
1044
1045 if (unlikely(el->exit)) {
1046 fr_strerror_const("Event loop exiting");
1047 return -1;
1048 }
1049
1050 if (!ef_out || !*ef_out) {
1051 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1052 } else {
1053 ef = *ef_out;
1054 fr_assert((fd < 0) || (ef->fd == fd));
1055 }
1056
1057 /*
1058 * Need to free the event to change the talloc link.
1059 *
1060 * This is generally bad. If you hit this
1061 * code path you probably screwed up somewhere.
1062 */
1063 if (unlikely(ef && (ef->linked_ctx != ctx))) TALLOC_FREE(ef);
1064
1065 /*
1066 * No pre-existing event. Allocate an entry
1067 * for insertion into the rbtree.
1068 */
1069 if (!ef) {
1070 ef = talloc_zero(el, fr_event_fd_t);
1071 if (unlikely(!ef)) {
1072 fr_strerror_const("Out of memory");
1073 return -1;
1074 }
1075 talloc_set_destructor(ef, _event_fd_delete);
1076
1077 /*
1078 * Bind the lifetime of the event to the specified
1079 * talloc ctx. If the talloc ctx is freed, the
1080 * event will also be freed.
1081 */
1082 if (ctx != el) talloc_link_ctx(ctx, ef);
1083 ef->linked_ctx = ctx;
1084 ef->el = el;
1085
1086 /*
1087 * Determine what type of file descriptor
1088 * this is.
1089 */
1090 if (fr_event_fd_type_set(ef, fd) < 0) {
1091 free:
1092 talloc_free(ef);
1093 return -1;
1094 }
1095
1096 /*
1097 * Check the filter value is valid
1098 */
1099 if ((filter > (NUM_ELEMENTS(filter_maps) - 1))) {
1100 not_supported:
1101 fr_strerror_printf("Filter %u not supported", filter);
1102 goto free;
1103 }
1104 ef->map = &filter_maps[filter];
1105 if (ef->map->idx_type == FR_EVENT_FUNC_IDX_NONE) goto not_supported;
1106
1107 count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
1108 &ef->active, ef, funcs, &ef->active);
1109 if (count < 0) goto free;
1110 if (count && (unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0))) {
1111 fr_strerror_printf("Failed inserting filters for FD %i: %s", fd, fr_syserror(errno));
1112 goto free;
1113 }
1114
1115 ef->filter = filter;
1116 fr_rb_insert(el->fds, ef);
1117 ef->is_registered = true;
1118
1119 /*
1120 * Pre-existing event, update the filters and
1121 * functions associated with the file descriptor.
1122 */
1123 } else {
1124 fr_assert(ef->is_registered == true);
1125
1126 /*
1127 * Take a copy of the current set of active
1128 * functions, so we can error out in a
1129 * consistent state.
1130 */
1131 memcpy(&active, &ef->active, sizeof(ef->active));
1132
1133 fr_assert((ef->armour == 0) || ef->active.io.read);
1134
1135 count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
1136 &ef->active, ef, funcs, &ef->active);
1137 if (count < 0) {
1138 error:
1139 memcpy(&ef->active, &active, sizeof(ef->active));
1140 return -1;
1141 }
1142 if (count && (unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0))) {
1143 fr_strerror_printf("Failed modifying filters for FD %i: %s", fd, fr_syserror(errno));
1144 goto error;
1145 }
1146
1147 /*
1148 * Clear any previously suspended functions
1149 */
1150 memset(&ef->stored, 0, sizeof(ef->stored));
1151 }
1152
1153#ifndef NDEBUG
1154 ef->file = file;
1155 ef->line = line;
1156#endif
1157 ef->error = error;
1158 ef->uctx = uctx;
1159
1160 if (ef_out) *ef_out = ef;
1161
1162 return 0;
1163}
1164
1165/** Associate I/O callbacks with a file descriptor
1166 *
1167 * @param[in] ctx to bind lifetime of the event to.
1168 * @param[out] ef_out Where to store the output event
1169 * @param[in] el to insert fd callback into.
1170 * @param[in] fd to install filters for.
1171 * @param[in] read_fn function to call when fd is readable.
1172 * @param[in] write_fn function to call when fd is writable.
1173 * @param[in] error function to call when an error occurs on the fd.
1174 * @param[in] uctx to pass to handler.
1175 * @return
1176 * - 0 on success.
1177 * - -1 on failure.
1178 */
1180 TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd,
1181 fr_event_fd_cb_t read_fn,
1182 fr_event_fd_cb_t write_fn,
1183 fr_event_error_cb_t error,
1184 void *uctx)
1185{
1186 fr_event_io_func_t funcs = { .read = read_fn, .write = write_fn };
1187
1188 if (unlikely(!read_fn && !write_fn)) {
1189 fr_strerror_const("Invalid arguments: All callbacks are NULL");
1190 return -1;
1191 }
1192
1194 ctx, ef_out, el, fd, FR_EVENT_FILTER_IO, &funcs, error, uctx);
1195}
1196
1197/** Remove a file descriptor from the event loop
1198 *
1199 * @param[in] el to remove file descriptor from.
1200 * @param[in] fd to remove.
1201 * @param[in] filter The type of filter to remove.
1202 * @return
1203 * - 0 if file descriptor was removed.
1204 * - <0 on error.
1205 */
1207{
1208 fr_event_fd_t *ef;
1209
1210 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1211 if (unlikely(!ef)) {
1212 fr_strerror_printf("No events are registered for fd %d, filter %u", fd, filter);
1213 return -1;
1214 }
1215
1216 /*
1217 * Free will normally fail if it's
1218 * a deferred free. There is a special
1219 * case for kevent failures though.
1220 *
1221 * We distinguish between the two by
1222 * looking to see if the ef is still
1223 * in the even tree.
1224 *
1225 * Talloc returning -1 guarantees the
1226 * memory has not been freed.
1227 */
1228 if ((talloc_free(ef) == -1) && ef->is_registered) return -1;
1229
1230 return 0;
1231}
1232
1233/** Get the opaque event handle from a file descriptor
1234 *
1235 * @param[in] el to search for fd/filter in.
1236 * @param[in] fd to search for.
1237 * @param[in] filter to search for.
1238 * @return
1239 * - NULL if no event could be found.
1240 * - The opaque handle representing an fd event.
1241 */
1243{
1244 fr_event_fd_t *ef;
1245
1246 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1247 if (unlikely(!ef)) {
1248 fr_strerror_printf("No events are registered for fd %i", fd);
1249 return NULL;
1250 }
1251
1252 return ef;
1253}
1254
1255/** Returns the appropriate callback function for a given event
1256 *
1257 * @param[in] ef the event filter fd handle.
1258 * @param[in] kq_filter If the callbacks are indexed by filter.
1259 * @param[in] kq_fflags If the callbacks are indexed by NOTES (fflags).
1260 * @return
1261 * - NULL if no event it associated with the given ef/kq_filter or kq_fflags combo.
1262 * - The callback that would be called if an event with this filter/fflag combo was received.
1263 */
1264fr_event_fd_cb_t fr_event_fd_cb(fr_event_fd_t *ef, int kq_filter, int kq_fflags)
1265{
1266 return event_fd_func(ef, &kq_filter, &kq_fflags);
1267}
1268
1269/** Returns the uctx associated with an fr_event_fd_t handle
1270 *
1271 */
1273{
1274 return ef->uctx;
1275}
1276
1277#ifndef NDEBUG
1278/** Armour an FD
1279 *
1280 * @param[in] el to remove file descriptor from.
1281 * @param[in] fd to remove.
1282 * @param[in] filter The type of filter to remove.
1283 * @param[in] armour The armour to add.
1284 * @return
1285 * - 0 if file descriptor was armoured
1286 * - <0 on error.
1287 */
1288int fr_event_fd_armour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
1289{
1290 fr_event_fd_t *ef;
1291
1292 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1293 if (unlikely(!ef)) {
1294 fr_strerror_printf("No events are registered for fd %i", fd);
1295 return -1;
1296 }
1297
1298 if (ef->armour != 0) {
1299 fr_strerror_printf("FD %i is already armoured", fd);
1300 return -1;
1301 }
1302
1303 ef->armour = armour;
1304
1305 return 0;
1306}
1307
1308/** Unarmour an FD
1309 *
1310 * @param[in] el to remove file descriptor from.
1311 * @param[in] fd to remove.
1312 * @param[in] filter The type of filter to remove.
1313 * @param[in] armour The armour to remove
1314 * @return
1315 * - 0 if file descriptor was unarmoured
1316 * - <0 on error.
1317 */
1318int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
1319{
1320 fr_event_fd_t *ef;
1321
1322 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1323 if (unlikely(!ef)) {
1324 fr_strerror_printf("No events are registered for fd %i", fd);
1325 return -1;
1326 }
1327
1328 fr_assert(ef->armour == armour);
1329
1330 ef->armour = 0;
1331 return 0;
1332}
1333#endif
1334
1335/** Remove PID wait event from kevent if the fr_event_pid_t is freed
1336 *
1337 * @param[in] ev to free.
1338 * @return 0
1339 */
1341{
1342 struct kevent evset;
1343
1344 if (ev->parent) *ev->parent = NULL;
1345 if (!ev->is_registered || (ev->pid < 0)) return 0; /* already deleted from kevent */
1346
1347 EVENT_DEBUG("%p - Disabling event for PID %u - %p was freed", ev->el, (unsigned int)ev->pid, ev);
1348
1349 EV_SET(&evset, ev->pid, EVFILT_PROC, EV_DELETE, NOTE_EXIT, 0, ev);
1350
1351 (void) kevent(ev->el->kq, &evset, 1, NULL, 0, NULL);
1352
1353 return 0;
1354}
1355
1356/** Evaluate a EVFILT_PROC event
1357 *
1358 */
1359CC_NO_UBSAN(function) /* UBSAN: false positive - Public/private version of fr_event_list_t trips -fsanitize=function */
1360static inline CC_HINT(always_inline)
1361void event_pid_eval(fr_event_list_t *el, struct kevent *kev)
1362{
1363 pid_t pid;
1364 fr_event_pid_t *ev;
1365 fr_event_pid_cb_t callback;
1366 void *uctx;
1367
1368 EVENT_DEBUG("%p - PID %u exited with status %i",
1369 el, (unsigned int)kev->ident, (unsigned int)kev->data);
1370
1371 ev = talloc_get_type_abort((void *)kev->udata, fr_event_pid_t);
1372
1373 fr_assert(ev->pid == (pid_t) kev->ident);
1374 fr_assert((kev->fflags & NOTE_EXIT) != 0);
1375
1376 pid = ev->pid;
1377 callback = ev->callback;
1378 uctx = ev->uctx;
1379
1380 ev->is_registered = false; /* so we won't hit kevent again when it's freed */
1381
1382 /*
1383 * Delete the event before calling it.
1384 *
1385 * This also sets the parent pointer
1386 * to NULL, so the thing that started
1387 * monitoring the process knows the
1388 * handle is no longer valid.
1389 *
1390 * EVFILT_PROC NOTE_EXIT events are always
1391 * oneshot no matter what flags we pass,
1392 * so we're just reflecting the state of
1393 * the kqueue.
1394 */
1395 talloc_free(ev);
1396
1397 if (callback) callback(el, pid, (int) kev->data, uctx);
1398}
1399
1400/** Called on the next loop through the event loop when inserting an EVFILT_PROC event fails
1401 *
1402 * This is just a trampoleen function which takes the user event and simulates
1403 * an EVFILT_PROC event from it.
1404 *
1405 * @param[in] el That received the event.
1406 * @param[in] uctx An fr_event_pid_t to process.
1407 */
1409{
1410 fr_event_pid_t *ev = talloc_get_type_abort(uctx, fr_event_pid_t);
1411
1412 EVENT_DEBUG("%p - PID %ld exited early, triggered through user event", el, (long)ev->pid);
1413
1414 /*
1415 * Simulate a real struct kevent with the values we
1416 * recorded in fr_event_pid_wait.
1417 */
1418 event_pid_eval(el, &(struct kevent){ .ident = ev->pid, .data = ev->early_exit.status, .fflags = NOTE_EXIT, .udata = ev });
1419}
1420
1421/** Insert a PID event into an event list
1422 *
1423 * @note The talloc parent of the memory returned in ev_p must not be changed.
1424 * If the lifetime of the event needs to be bound to another context
1425 * this function should be called with the existing event pointed to by
1426 * ev_p.
1427 *
1428 * @param[in] ctx to bind lifetime of the event to.
1429 * @param[in] el to insert event into.
1430 * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
1431 * in a temporal sense, not in a memory structure or dependency sense.
1432 * @param[in] pid child PID to wait for
1433 * @param[in] callback function to execute if the event fires.
1434 * @param[in] uctx user data to pass to the event.
1435 * @return
1436 * - 0 on success.
1437 * - -1 on failure.
1438 */
1440 TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p,
1441 pid_t pid, fr_event_pid_cb_t callback, void *uctx)
1442{
1443 fr_event_pid_t *ev;
1444 struct kevent evset;
1445
1446 ev = talloc(ctx, fr_event_pid_t);
1447 if (unlikely(ev == NULL)) {
1448 fr_strerror_const("Out of memory");
1449 return -1;
1450 }
1451 *ev = (fr_event_pid_t) {
1452 .el = el,
1453 .pid = pid,
1454 .callback = callback,
1455 .uctx = uctx,
1456 .parent = ev_p,
1457#ifndef NDEBUG
1458 .file = file,
1459 .line = line,
1460#endif
1461 };
1462 talloc_set_destructor(ev, _event_pid_free);
1463
1464 /*
1465 * macOS only, on FreeBSD NOTE_EXIT always provides
1466 * the status anyway.
1467 */
1468#ifndef NOTE_EXITSTATUS
1469#define NOTE_EXITSTATUS (0)
1470#endif
1471
1472 EVENT_DEBUG("%p - Adding exit waiter for PID %u", el, (unsigned int)pid);
1473
1474 EV_SET(&evset, pid, EVFILT_PROC, EV_ADD | EV_ONESHOT, NOTE_EXIT | NOTE_EXITSTATUS, 0, ev);
1475 ev->is_registered = true;
1476
1477 /*
1478 * This deals with the race where the process exited
1479 * before we could add it to the kqueue.
1480 *
1481 * Unless our caller is broken, the process should
1482 * still be available for reaping, so we check
1483 * waitid to see if there is a pending process and
1484 * then call the callback as kqueue would have done.
1485 */
1486 if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
1487 siginfo_t info;
1488 int ret;
1489
1490 /*
1491 * Ensure we don't accidentally pick up the error
1492 * from kevent.
1493 */
1495
1496 ev->is_registered = false;
1497
1498 /*
1499 * If the child exited before kevent() was
1500 * called, we need to get its status via
1501 * waitid().
1502 *
1503 * We don't reap the process here to emulate
1504 * what kqueue does (notify but not reap).
1505 *
1506 * waitid returns >0 on success, 0 if the
1507 * process is still running, and -1 on failure.
1508 *
1509 * If we get a 0, then that's extremely strange
1510 * as adding the kevent failed for a reason
1511 * other than the process already having exited.
1512 *
1513 * On Linux waitid will always return 1 to
1514 * indicate the process exited.
1515 *
1516 * On macOS we seem to get a mix of 1 or 0,
1517 * even if the si_code is one of the values
1518 * we'd consider to indicate that the process
1519 * had completed.
1520 */
1521 ret = waitid(P_PID, pid, &info, WEXITED | WNOHANG | WNOWAIT);
1522 if (ret > 0) {
1523 static fr_table_num_sorted_t const si_codes[] = {
1524 { L("exited"), CLD_EXITED },
1525 { L("killed"), CLD_KILLED },
1526 { L("dumped"), CLD_DUMPED },
1527 { L("trapped"), CLD_TRAPPED },
1528 { L("stopped"), CLD_STOPPED },
1529 { L("continued"), CLD_CONTINUED }
1530 };
1531 static size_t si_codes_len = NUM_ELEMENTS(si_codes);
1532
1533 switch (info.si_code) {
1534 case CLD_EXITED:
1535 case CLD_KILLED:
1536 case CLD_DUMPED:
1537 EVENT_DEBUG("%p - PID %ld early exit - code %s (%d), status %d",
1538 el, (long)pid, fr_table_str_by_value(si_codes, info.si_code, "<UNKOWN>"),
1539 info.si_code, info.si_status);
1540
1541 /*
1542 * Record the status for later
1543 */
1544 ev->early_exit.status = info.si_status;
1545
1546 /*
1547 * The user event acts as a surrogate for
1548 * an EVFILT_PROC event, and will be evaluated
1549 * during the next loop through the event loop.
1550 *
1551 * It will be automatically deleted when the
1552 * fr_event_pid_t is freed.
1553 *
1554 * Previously we tried to evaluate the proc
1555 * callback here directly, but this lead to
1556 * multiple problems, the biggest being that
1557 * setting requests back to resumable failed
1558 * because they were not yet yielded,
1559 * leading to hangs.
1560 */
1561 early_exit:
1562 if (fr_event_user_insert(ev, el, &ev->early_exit.ev, true, _fr_event_pid_early_exit, ev) < 0) {
1563 fr_strerror_printf_push("Failed adding wait for PID %ld, and failed adding "
1564 "backup user event", (long) pid);
1565 error:
1566 talloc_free(ev);
1567 return -1;
1568 }
1569 break;
1570
1571 default:
1572 fr_strerror_printf("Unexpected code %s (%d) whilst waiting on PID %ld",
1573 fr_table_str_by_value(si_codes, info.si_code, "<UNKOWN>"),
1574 info.si_code, (long) pid);
1575
1576 goto error;
1577 }
1578 /*
1579 * Failed adding waiter for process, but process has not completed...
1580 *
1581 * This weird, but seems to happen on macOS occasionally.
1582 *
1583 * Add an event to run early exit...
1584 *
1585 * Man pages for waitid say if it returns 0 the info struct can be in
1586 * a nondeterministic state, so there's nothing more to do.
1587 */
1588 } else if (ret == 0) {
1589 goto early_exit;
1590 } else {
1591 /*
1592 * Print this error here, so that the caller gets
1593 * the error from kevent(), and not waitpid().
1594 */
1595 fr_strerror_printf("Failed adding waiter for PID %ld - kevent %s, waitid %s",
1596 (long) pid, fr_syserror(evset.flags), fr_syserror(errno));
1597
1598 goto error;
1599 }
1600 }
1601
1602 /*
1603 * Sometimes the caller doesn't care about getting the
1604 * PID. But we still want to clean it up.
1605 */
1606 if (ev_p) *ev_p = ev;
1607
1608 return 0;
1609}
1610
1611/** Saves some boilerplate...
1612 *
1613 */
1614static inline CC_HINT(always_inline)
1615void event_list_reap_run_callback(fr_event_pid_reap_t *reap, pid_t pid, int status)
1616{
1617 if (reap->callback) reap->callback(reap->el, pid, status, reap->uctx);
1618}
1619
1620/** Does the actual reaping of PIDs
1621 *
1622 */
1623static void _fr_event_pid_reap_cb(UNUSED fr_event_list_t *el, pid_t pid, int status, void *uctx)
1624{
1625 fr_event_pid_reap_t *reap = talloc_get_type_abort(uctx, fr_event_pid_reap_t);
1626
1627 waitpid(pid, &status, WNOHANG); /* Don't block the process if there's a logic error somewhere */
1628
1629 EVENT_DEBUG("%s - Reaper reaped PID %u, status %u - %p", __FUNCTION__, pid, status, reap);
1630
1631 event_list_reap_run_callback(reap, pid, status);
1632
1633 talloc_free(reap);
1634}
1635
1637{
1638 /*
1639 * Clear out the entry in the pid_to_reap
1640 * list if the event was inserted.
1641 */
1642 if (fr_dlist_entry_in_list(&reap->entry)) {
1643 EVENT_DEBUG("%s - Removing entry from pid_to_reap %i - %p", __FUNCTION__,
1644 reap->pid_ev ? reap->pid_ev->pid : -1, reap);
1645 fr_dlist_remove(&reap->el->pid_to_reap, reap);
1646 }
1647
1648 return 0;
1649}
1650
1651/** Asynchronously wait for a PID to exit, then reap it
1652 *
1653 * This is intended to be used when we no longer care about a process
1654 * exiting, but we still want to clean up its state so we don't have
1655 * zombie processes sticking around.
1656 *
1657 * @param[in] el to use to reap the process.
1658 * @param[in] pid to reap.
1659 * @param[in] callback to call when the process is reaped.
1660 * May be NULL.
1661 * @param[in] uctx to pass to callback.
1662 * @return
1663 * - -1 if we couldn't find the process or it has already exited/been reaped.
1664 * - 0 on success (we setup a process handler).
1665 */
1667{
1668 int ret;
1669 fr_event_pid_reap_t *reap;
1670
1671 reap = talloc_zero(NULL, fr_event_pid_reap_t);
1672 if (unlikely(!reap)) {
1673 fr_strerror_const("Out of memory");
1674 return -1;
1675 }
1676 talloc_set_destructor(reap, _fr_event_reap_free);
1677
1679 if (ret < 0) {
1680 talloc_free(reap);
1681 return ret;
1682 }
1683
1684 reap->el = el;
1685 reap->callback = callback;
1686 reap->uctx = uctx;
1687
1688 EVENT_DEBUG("%s - Adding reaper for PID %u - %p", __FUNCTION__, pid, reap);
1689
1691
1692 return ret;
1693}
1694
1695/** Send a signal to all the processes we have in our reap list, and reap them
1696 *
1697 * @param[in] el containing the processes to reap.
1698 * @param[in] timeout how long to wait before we signal the processes.
1699 * @param[in] signal to send to processes. Should be a fatal signal.
1700 * @return The number of processes reaped.
1701 */
1703{
1705 fr_event_pid_reap_t *reap = NULL;
1706
1707 /*
1708 * If we've got a timeout, our best option
1709 * is to use a kqueue instance to monitor
1710 * for process exit.
1711 */
1713 int status;
1714 struct kevent evset;
1715 int waiting = 0;
1716 int kq = kqueue();
1717 fr_time_t now, start = el->pub.tl->time(), end = fr_time_add(start, timeout);
1718
1719 if (unlikely(kq < 0)) goto force;
1720
1722 if (!i->pid_ev) {
1723 EVENT_DEBUG("%p - %s - Reaper already called (logic error)... - %p",
1724 el, __FUNCTION__, i);
1725
1726 event_list_reap_run_callback(i, -1, SIGKILL);
1727 talloc_free(i);
1728 continue;
1729 }
1730
1731 /*
1732 * See if any processes have exited already
1733 */
1734 if (waitpid(i->pid_ev->pid, &status, WNOHANG) == i->pid_ev->pid) { /* reap */
1735 EVENT_DEBUG("%p - %s - Reaper PID %u already exited - %p",
1736 el, __FUNCTION__, i->pid_ev->pid, i);
1737 event_list_reap_run_callback(i, i->pid_ev->pid, SIGKILL);
1738 talloc_free(i);
1739 continue;
1740 }
1741
1742 /*
1743 * Add the rest to a temporary event loop
1744 */
1745 EV_SET(&evset, i->pid_ev->pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, i);
1746 if (kevent(kq, &evset, 1, NULL, 0, NULL) < 0) {
1747 EVENT_DEBUG("%p - %s - Failed adding reaper PID %u to tmp event loop - %p",
1748 el, __FUNCTION__, i->pid_ev->pid, i);
1749 event_list_reap_run_callback(i, i->pid_ev->pid, SIGKILL);
1750 talloc_free(i);
1751 continue;
1752 }
1753 waiting++;
1754 }}
1755
1756 /*
1757 * Keep draining process exits as they come in...
1758 */
1759 while ((waiting > 0) && fr_time_gt(end, (now = el->pub.tl->time()))) {
1760 struct kevent kev;
1761 int ret;
1762
1763 ret = kevent(kq, NULL, 0, &kev, 1, &fr_time_delta_to_timespec(fr_time_sub(end, now)));
1764 switch (ret) {
1765 default:
1766 EVENT_DEBUG("%p - %s - Reaper tmp loop error %s, forcing process reaping",
1767 el, __FUNCTION__, fr_syserror(errno));
1768 close(kq);
1769 goto force;
1770
1771 case 0:
1772 EVENT_DEBUG("%p - %s - Reaper timeout waiting for process exit, forcing process reaping",
1773 el, __FUNCTION__);
1774 close(kq);
1775 goto force;
1776
1777 case 1:
1778 reap = talloc_get_type_abort(kev.udata, fr_event_pid_reap_t);
1779
1780 EVENT_DEBUG("%p - %s - Reaper reaped PID %u, status %u - %p",
1781 el, __FUNCTION__, (unsigned int)kev.ident, (unsigned int)kev.data, reap);
1782 waitpid(reap->pid_ev->pid, &status, WNOHANG); /* reap */
1783
1784 event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
1785 talloc_free(reap);
1786 break;
1787 }
1788 waiting--;
1789 }
1790
1791 close(kq);
1792 }
1793
1794force:
1795 /*
1796 * Deal with any lingering reap requests
1797 */
1798 while ((reap = fr_dlist_head(&el->pid_to_reap))) {
1799 int status;
1800
1801 EVENT_DEBUG("%s - Reaper forcefully reaping PID %u - %p", __FUNCTION__, reap->pid_ev->pid, reap);
1802
1803 if (kill(reap->pid_ev->pid, signal) < 0) {
1804 /*
1805 * Make sure we don't hang if the
1806 * process has actually exited.
1807 *
1808 * We could check for ESRCH but it's
1809 * not clear if that'd be returned
1810 * for a PID in the unreaped state
1811 * or not...
1812 */
1813 waitpid(reap->pid_ev->pid, &status, WNOHANG);
1814 event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
1815 talloc_free(reap);
1816 continue;
1817 }
1818
1819 /*
1820 * Wait until the child process exits
1821 */
1822 waitpid(reap->pid_ev->pid, &status, 0);
1823 event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
1825 }
1826
1828}
1829
1830/** Memory will not be freed if we fail to remove the event from the kqueue
1831 *
1832 * It's easier to debug memory leaks with modern tooling than it is
1833 * to determine why we get random failures and event leaks inside of kqueue.
1834 *
1835 * @return
1836 * - 0 on success.
1837 * - -1 on failure.
1838 */
1840{
1841 if (ev->is_registered) {
1842 struct kevent evset;
1843
1844 EV_SET(&evset, (uintptr_t)ev, EVFILT_USER, EV_DELETE, 0, 0, 0);
1845
1846 if (unlikely(kevent(ev->el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
1847 fr_strerror_printf("Failed removing user event - kevent %s", fr_syserror(evset.flags));
1848 return -1;
1849 }
1850 ev->is_registered = false;
1851 }
1852
1853 return 0;
1854}
1855
1856static inline CC_HINT(always_inline)
1857void event_user_eval(fr_event_list_t *el, struct kevent *kev)
1858{
1859 fr_event_user_t *ev;
1860
1861 /*
1862 * This is just a "wakeup" event, which
1863 * is always ignored.
1864 */
1865 if (kev->ident == 0) return;
1866
1867 ev = talloc_get_type_abort((void *)kev->ident, fr_event_user_t);
1868 fr_assert((uintptr_t)ev == kev->ident);
1869
1870 ev->callback(el, ev->uctx);
1871}
1872
1873/** Add a user callback to the event list.
1874 *
1875 * @param[in] ctx to allocate the event in.
1876 * @param[in] el Containing the timer events.
1877 * @param[out] ev_p Where to write a pointer.
1878 * @param[in] trigger Whether the user event is triggered initially.
1879 * @param[in] callback for EVFILT_USER.
1880 * @param[in] uctx for the callback.
1881 * @return
1882 * - 0 on success.
1883 * - -1 on error.
1884 */
1886 TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p,
1887 bool trigger, fr_event_user_cb_t callback, void *uctx)
1888{
1889 fr_event_user_t *ev;
1890 struct kevent evset;
1891
1892 ev = talloc(ctx, fr_event_user_t);
1893 if (unlikely(ev == NULL)) {
1894 fr_strerror_const("Out of memory");
1895 return -1;
1896 }
1897 *ev = (fr_event_user_t) {
1898 .el = el,
1899 .callback = callback,
1900 .uctx = uctx,
1901#ifndef NDEBUG
1902 .file = file,
1903 .line = line,
1904#endif
1905 };
1906
1907 EV_SET(&evset, (uintptr_t)ev,
1908 EVFILT_USER, EV_ADD | EV_DISPATCH, (trigger * NOTE_TRIGGER), 0, ev);
1909
1910 if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
1911 fr_strerror_printf("Failed adding user event - kevent %s", fr_syserror(evset.flags));
1912 talloc_free(ev);
1913 return -1;
1914 }
1915 ev->is_registered = true;
1916 talloc_set_destructor(ev, _event_user_delete);
1917
1918 if (ev_p) *ev_p = ev;
1919
1920 return 0;
1921}
1922
1923/** Trigger a user event
1924 *
1925 * @param[in] el containing the user event.
1926 * @param[in] ev Handle for the user event.
1927 * @return
1928 * - 0 on success.
1929 * - -1 on error.
1930 */
1932{
1933 struct kevent evset;
1934
1935 EV_SET(&evset, (uintptr_t)ev, EVFILT_USER, EV_ENABLE, NOTE_TRIGGER, 0, NULL);
1936
1937 if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
1938 fr_strerror_printf("Failed triggering user event - kevent %s", fr_syserror(evset.flags));
1939 return -1;
1940 }
1941
1942 return 0;
1943}
1944
1945/** Add a pre-event callback to the event list.
1946 *
1947 * Events are serviced in insert order. i.e. insert A, B, we then
1948 * have A running before B.
1949 *
1950 * @param[in] el Containing the timer events.
1951 * @param[in] callback The pre-processing callback.
1952 * @param[in] uctx for the callback.
1953 * @return
1954 * - < 0 on error
1955 * - 0 on success
1956 */
1958{
1959 fr_event_pre_t *pre;
1960
1961 pre = talloc(el, fr_event_pre_t);
1962 pre->callback = callback;
1963 pre->uctx = uctx;
1964
1966
1967 return 0;
1968}
1969
1970/** Delete a pre-event callback from the event list.
1971 *
1972 * @param[in] el Containing the timer events.
1973 * @param[in] callback The pre-processing callback.
1974 * @param[in] uctx for the callback.
1975 * @return
1976 * - < 0 on error
1977 * - 0 on success
1978 */
1980{
1981 fr_event_pre_t *pre, *next;
1982
1983 for (pre = fr_dlist_head(&el->pre_callbacks);
1984 pre != NULL;
1985 pre = next) {
1986 next = fr_dlist_next(&el->pre_callbacks, pre);
1987
1988 if ((pre->callback == callback) &&
1989 (pre->uctx == uctx)) {
1991 talloc_free(pre);
1992 return 0;
1993 }
1994 }
1995
1996 return -1;
1997}
1998
1999/** Add a post-event callback to the event list.
2000 *
2001 * Events are serviced in insert order. i.e. insert A, B, we then
2002 * have A running before B.
2003 *
2004 * @param[in] el Containing the timer events.
2005 * @param[in] callback The post-processing callback.
2006 * @param[in] uctx for the callback.
2007 * @return
2008 * - < 0 on error
2009 * - 0 on success
2010 */
2012{
2013 fr_event_post_t *post;
2014
2015 post = talloc(el, fr_event_post_t);
2016 post->callback = callback;
2017 post->uctx = uctx;
2018
2020
2021 return 0;
2022}
2023
2024/** Delete a post-event callback from the event list.
2025 *
2026 * @param[in] el Containing the timer events.
2027 * @param[in] callback The post-processing callback.
2028 * @param[in] uctx for the callback.
2029 * @return
2030 * - < 0 on error
2031 * - 0 on success
2032 */
2034{
2035 fr_event_post_t *post, *next;
2036
2037 for (post = fr_dlist_head(&el->post_callbacks);
2038 post != NULL;
2039 post = next) {
2040 next = fr_dlist_next(&el->post_callbacks, post);
2041
2042 if ((post->callback == callback) &&
2043 (post->uctx == uctx)) {
2045 talloc_free(post);
2046 return 0;
2047 }
2048 }
2049
2050 return -1;
2051}
2052
2053/** Gather outstanding timer and file descriptor events
2054 *
2055 * @param[in] el to process events for.
2056 * @param[in] now The current time.
2057 * @param[in] wait if true, block on the kevent() call until a timer or file descriptor event occurs.
2058 * @return
2059 * - <0 error, or the event loop is exiting
2060 * - the number of outstanding I/O events, +1 if at least one timer will fire.
2061 */
2063{
2064 fr_time_delta_t when, *wake;
2065 struct timespec ts_when, *ts_wake;
2066 fr_event_pre_t *pre;
2067 int num_fd_events;
2068 bool timer_event_ready = false;
2069 fr_time_t next;
2070
2071 el->num_fd_events = 0;
2072
2073 if (el->will_exit || el->exit) {
2074 el->exit = el->will_exit;
2075
2076 fr_strerror_const("Event loop exiting");
2077 return -1;
2078 }
2079
2080 /*
2081 * By default we wait for 0ns, which means returning
2082 * immediately from kevent().
2083 */
2084 when = fr_time_delta_wrap(0);
2085 wake = &when;
2086
2087 /*
2088 * See when we have to wake up. Either now, if the timer
2089 * events are in the past. Or, we wait for a future
2090 * timer event.
2091 */
2092 next = fr_timer_list_when(el->pub.tl);
2093 if (fr_time_neq(next, fr_time_wrap(0))) {
2094 if (fr_time_lteq(next, now)) {
2095 timer_event_ready = true;
2096
2097 } else if (wait) {
2098 when = fr_time_sub(next, now);
2099
2100 } /* else we're not waiting, leave "when == 0" */
2101
2102 } else if (wait) {
2103 /*
2104 * We're asked to wait, but there's no timer
2105 * event. We can then sleep forever.
2106 */
2107 wake = NULL;
2108 }
2109
2110 /*
2111 * Run the status callbacks. It may tell us that the
2112 * application has more work to do, in which case we
2113 * re-set the timeout to be instant.
2114 *
2115 * We only run these callbacks if the caller is otherwise
2116 * idle.
2117 */
2118 if (wait) {
2119 for (pre = fr_dlist_head(&el->pre_callbacks);
2120 pre != NULL;
2121 pre = fr_dlist_next(&el->pre_callbacks, pre)) {
2122 if (pre->callback(now, wake ? *wake : fr_time_delta_wrap(0), pre->uctx) > 0) {
2123 wake = &when;
2124 when = fr_time_delta_wrap(0);
2125 }
2126 }
2127 }
2128
2129 /*
2130 * Wake is the delta between el->now
2131 * (the event loops view of the current time)
2132 * and when the event should occur.
2133 */
2134 if (wake) {
2135 ts_when = fr_time_delta_to_timespec(when);
2136 ts_wake = &ts_when;
2137 } else {
2138 ts_wake = NULL;
2139 }
2140
2141 /*
2142 * Populate el->events with the list of I/O events
2143 * that occurred since this function was last called
2144 * or wait for the next timer event.
2145 */
2146 num_fd_events = kevent(el->kq, NULL, 0, el->events, FR_EV_BATCH_FDS, ts_wake);
2147
2148 /*
2149 * Interrupt is different from timeout / FD events.
2150 */
2151 if (unlikely(num_fd_events < 0)) {
2152 if (errno == EINTR) {
2153 return 0;
2154 } else {
2155 fr_strerror_printf("Failed calling kevent: %s", fr_syserror(errno));
2156 return -1;
2157 }
2158 }
2159
2160 el->num_fd_events = num_fd_events;
2161
2162 EVENT_DEBUG("%p - %s - kevent returned %u FD events", el, __FUNCTION__, el->num_fd_events);
2163
2164 /*
2165 * If there are no FD events, we must have woken up from a timer
2166 */
2167 if (!num_fd_events) {
2168 if (wait) timer_event_ready = true;
2169 }
2170 /*
2171 * The caller doesn't really care what the value of the
2172 * return code is. Just that it's greater than zero if
2173 * events needs servicing.
2174 *
2175 * num_fd_events > 0 - if kevent() returns FD events
2176 * timer_event_ready > 0 - if there were timers ready BEFORE or AFTER calling kevent()
2177 */
2178 return num_fd_events + timer_event_ready;
2179}
2180
2181CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private fr_event_list_t trips --fsanitize=function*/
2182static inline CC_HINT(always_inline)
2183void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags)
2184{
2185 fr_event_fd_cb_t fd_cb;
2186
2187 while ((fd_cb = event_fd_func(ef, filter, fflags))) {
2188 fd_cb(el, ef->fd, flags, ef->uctx);
2189 }
2190}
2191
2192/** Service any outstanding timer or file descriptor events
2193 *
2194 * @param[in] el containing events to service.
2195 */
2196CC_NO_UBSAN(function) /* UBSAN: false positive - Public/private version of fr_event_list_t trips -fsanitize=function */
2198{
2199 fr_timer_list_t *etl = el->pub.tl;
2200 int i;
2201 fr_event_post_t *post;
2202 fr_time_t when, now;
2203
2204 if (unlikely(el->exit)) return;
2205
2206 EVENT_DEBUG("%p - %s - Servicing %u FD events", el, __FUNCTION__, el->num_fd_events);
2207
2208 /*
2209 * Run all of the file descriptor events.
2210 */
2211 el->in_handler = true;
2212 for (i = 0; i < el->num_fd_events; i++) {
2213 /*
2214 * Process any user events
2215 */
2216 switch (el->events[i].filter) {
2217 case EVFILT_USER:
2218 event_user_eval(el, &el->events[i]);
2219 continue;
2220
2221 /*
2222 * Process proc events
2223 */
2224 case EVFILT_PROC:
2225 event_pid_eval(el, &el->events[i]);
2226 continue;
2227
2228 /*
2229 * Process various types of file descriptor events
2230 */
2231 default:
2232 {
2233 fr_event_fd_t *ef = talloc_get_type_abort(el->events[i].udata, fr_event_fd_t);
2234 int fd_errno = 0;
2235
2236 int fflags = el->events[i].fflags; /* mutable */
2237 int filter = el->events[i].filter;
2238 int flags = el->events[i].flags;
2239
2240 if (!ef->is_registered) continue; /* Was deleted between corral and service */
2241
2242 if (unlikely(flags & EV_ERROR)) {
2243 fd_errno = el->events[i].data;
2244 ev_error:
2245 /*
2246 * Call the error handler, but only if the socket hasn't been deleted at EOF
2247 * below.
2248 */
2249 if (ef->is_registered && ef->error) ef->error(el, ef->fd, flags, fd_errno, ef->uctx);
2250 TALLOC_FREE(ef);
2251 continue;
2252 }
2253
2254 /*
2255 * EOF can indicate we've actually reached
2256 * the end of a file, but for sockets it usually
2257 * indicates the other end of the connection
2258 * has gone away.
2259 */
2260 if (flags & EV_EOF) {
2261 /*
2262 * This is fine, the callback will get notified
2263 * via the flags field.
2264 */
2265 if (ef->type == FR_EVENT_FD_FILE) goto service;
2266#if defined(__linux__) && defined(SO_GET_FILTER)
2267 /*
2268 * There seems to be an issue with the
2269 * ioctl(...SIOCNQ...) call libkqueue
2270 * uses to determine the number of bytes
2271 * readable. When ioctl returns, the number
2272 * of bytes available is set to zero, which
2273 * libkqueue interprets as EOF.
2274 *
2275 * As a workaround, if we're not reading
2276 * a file, and are operating on a raw socket
2277 * with a packet filter attached, we ignore
2278 * the EOF flag and continue.
2279 */
2280 if ((ef->sock_type == SOCK_RAW) && (ef->type == FR_EVENT_FD_PCAP)) goto service;
2281#endif
2282
2283 /*
2284 * If we see an EV_EOF flag that means the
2285 * read side of the socket has been closed
2286 * but there may still be pending data.
2287 *
2288 * Dispatch the read event and then error.
2289 */
2290 if ((el->events[i].filter == EVFILT_READ) && (el->events[i].data > 0)) {
2291 event_callback(el, ef, &filter, flags, &fflags);
2292 }
2293
2294 fd_errno = el->events[i].fflags;
2295
2296 goto ev_error;
2297 }
2298
2299 service:
2300#ifndef NDEBUG
2301 EVENT_DEBUG("Running event for fd %d, from %s[%d]", ef->fd, ef->file, ef->line);
2302#endif
2303
2304 /*
2305 * Service the event_fd events
2306 */
2307 event_callback(el, ef, &filter, flags, &fflags);
2308 }
2309 }
2310 }
2311
2312 /*
2313 * Process any deferred frees performed
2314 * by the I/O handlers.
2315 *
2316 * The events are removed from the FD rbtree
2317 * and kevent immediately, but frees are
2318 * deferred to allow stale events to be
2319 * skipped sans SEGV.
2320 */
2321 el->in_handler = false; /* Allow events to be deleted */
2322 {
2323 fr_event_fd_t *ef;
2324
2325 while ((ef = fr_dlist_head(&el->fd_to_free))) talloc_free(ef);
2326 }
2327
2328 /*
2329 * We must call el->time() again here, else the event
2330 * list's time gets updated too infrequently, and we
2331 * can end up with a situation where timers are
2332 * serviced much later than they should be, which can
2333 * cause strange interaction effects, spurious calls
2334 * to kevent, and busy loops.
2335 */
2336 now = etl->time();
2337
2338 /*
2339 * Run all of the timer events. Note that these can add
2340 * new timers!
2341 */
2343 int ret;
2344
2345 when = now;
2346
2347 ret = fr_timer_list_run(etl, &when);
2348 if (!fr_cond_assert(ret >= 0)) { /* catastrophic error, trigger event loop exit */
2349 el->exit = 1;
2350 return;
2351 }
2352
2353 EVENT_DEBUG("%p - %s - Serviced %u timer(s)", el, __FUNCTION__, (unsigned int)ret);
2354 }
2355
2356 now = etl->time();
2357
2358 /*
2359 * Run all of the post-processing events.
2360 */
2361 for (post = fr_dlist_head(&el->post_callbacks);
2362 post != NULL;
2363 post = fr_dlist_next(&el->post_callbacks, post)) {
2364 post->callback(el, now, post->uctx);
2365 }
2366}
2367
2368/** Signal an event loop exit with the specified code
2369 *
2370 * The event loop will complete its current iteration, and then exit with the specified code.
2371 *
2372 * @param[in] el to signal to exit.
2373 * @param[in] code for #fr_event_loop to return.
2374 */
2376{
2377 if (unlikely(!el)) return;
2378
2379 el->will_exit = code;
2380}
2381
2382/** Check to see whether the event loop is in the process of exiting
2383 *
2384 * @param[in] el to check.
2385 */
2387{
2388 return ((el->will_exit != 0) || (el->exit != 0));
2389}
2390
2391/** Run an event loop
2392 *
2393 * @note Will not return until #fr_event_loop_exit is called.
2394 *
2395 * @param[in] el to start processing.
2396 */
2397CC_HINT(flatten) int fr_event_loop(fr_event_list_t *el)
2398{
2399 el->will_exit = el->exit = 0;
2400
2401 el->dispatch = true;
2402 while (!el->exit) {
2403 if (unlikely(fr_event_corral(el, el->pub.tl->time(), true)) < 0) break;
2405 }
2406
2407 /*
2408 * Give processes five seconds to exit.
2409 * This means any triggers that we may
2410 * have issued when the server exited
2411 * have a chance to complete.
2412 */
2414 el->dispatch = false;
2415
2416 return el->exit;
2417}
2418
2419/** Cleanup an event list
2420 *
2421 * Frees/destroys any resources associated with an event list
2422 *
2423 * @param[in] el to free resources for.
2424 */
2426{
2428
2429 talloc_free_children(el);
2430
2431 if (el->kq >= 0) close(el->kq);
2432
2433 return 0;
2434}
2435
2436/** Free any memory we allocated for indexes
2437 *
2438 */
2439static int _event_free_indexes(UNUSED void *uctx)
2440{
2441 unsigned int i;
2442
2443 for (i = 0; i < NUM_ELEMENTS(filter_maps); i++) if (talloc_free(filter_maps[i].ev_to_func) < 0) return -1;
2444 return 0;
2445}
2446
2447static int _event_build_indexes(UNUSED void *uctx)
2448{
2449 unsigned int i;
2450
2452 return 0;
2453}
2454
2455#ifdef EVFILT_LIBKQUEUE
2456/** kqueue logging wrapper function
2457 *
2458 */
2459static CC_HINT(format (printf, 1, 2)) CC_HINT(nonnull)
2460void _event_kqueue_log(char const *fmt, ...)
2461{
2462 va_list ap;
2463
2464 va_start(ap, fmt);
2465 fr_vlog(&default_log, L_DBG, __FILE__, __LINE__, fmt, ap);
2466 va_end(ap);
2467}
2468
2469/** If we're building with libkqueue, and at debug level 4 or higher, enable libkqueue debugging output
2470 *
2471 * This requires a debug build of libkqueue
2472 */
2473static int _event_kqueue_logging(UNUSED void *uctx)
2474{
2475 struct kevent kev, receipt;
2476
2477 log_conf_kq = kqueue();
2478 if (unlikely(log_conf_kq < 0)) {
2479 fr_strerror_const("Failed initialising logging configuration kqueue");
2480 return -1;
2481 }
2482
2483 EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG_FUNC, (intptr_t)_event_kqueue_log, NULL);
2484 if (kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}) != 1) {
2485 close(log_conf_kq);
2486 log_conf_kq = -1;
2487 return 1;
2488 }
2489
2490 if (fr_debug_lvl >= L_DBG_LVL_3) {
2491 EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG, 1, NULL);
2492 if (kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}) != 1) {
2493 fr_strerror_const("Failed enabling libkqueue debug logging");
2494 close(log_conf_kq);
2495 log_conf_kq = -1;
2496 return -1;
2497 }
2498 }
2499
2500 return 0;
2501}
2502
2503static int _event_kqueue_logging_stop(UNUSED void *uctx)
2504{
2505 struct kevent kev, receipt;
2506
2507 EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG_FUNC, 0, NULL);
2508 (void)kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){});
2509
2510 close(log_conf_kq);
2511 log_conf_kq = -1;
2512
2513 return 0;
2514}
2515#endif
2516
2517/** Initialise a new event list
2518 *
2519 * @param[in] ctx to allocate memory in.
2520 * @param[in] status callback, called on each iteration of the event list.
2521 * @param[in] status_uctx context for the status callback
2522 * @return
2523 * - A pointer to a new event list on success (free with talloc_free).
2524 * - NULL on error.
2525 */
2526fr_event_list_t *fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_uctx)
2527{
2529 struct kevent kev;
2530 int ret;
2531
2532 /*
2533 * Build the map indexes the first time this
2534 * function is called.
2535 */
2536 fr_atexit_global_once_ret(&ret, _event_build_indexes, _event_free_indexes, NULL);
2537#ifdef EVFILT_LIBKQUEUE
2538 fr_atexit_global_once_ret(&ret, _event_kqueue_logging, _event_kqueue_logging_stop, NULL);
2539#endif
2540
2541 el = talloc_zero(ctx, fr_event_list_t);
2542 if (!fr_cond_assert(el)) {
2543 fr_strerror_const("Out of memory");
2544 return NULL;
2545 }
2546 el->kq = -1; /* So destructor can be used before kqueue() provides us with fd */
2547 talloc_set_destructor(el, _event_list_free);
2548
2550 if (!el->pub.tl) {
2551 fr_strerror_const("Failed allocating timer list");
2552 error:
2553 talloc_free(el);
2554 return NULL;
2555 }
2556
2558 if (!el->fds) {
2559 fr_strerror_const("Failed allocating FD tree");
2560 goto error;
2561 }
2562
2563 el->kq = kqueue();
2564 if (el->kq < 0) {
2565 fr_strerror_printf("Failed allocating kqueue: %s", fr_syserror(errno));
2566 goto error;
2567 }
2568
2573 if (status) (void) fr_event_pre_insert(el, status, status_uctx);
2574
2575 /*
2576 * Set our "exit" callback as ident 0.
2577 */
2578 EV_SET(&kev, 0, EVFILT_USER, EV_ADD | EV_CLEAR, NOTE_FFNOP, 0, NULL);
2579 if (kevent(el->kq, &kev, 1, NULL, 0, NULL) < 0) {
2580 fr_strerror_printf("Failed adding exit callback to kqueue: %s", fr_syserror(errno));
2581 goto error;
2582 }
2583
2584 return el;
2585}
2586
2587/** Return whether the event loop has any active events
2588 *
2589 */
2594#ifdef TESTING
2595/*
2596 * cc -g -I .. -c rb.c -o rbtree.o && cc -g -I .. -c isaac.c -o isaac.o && cc -DTESTING -I .. -c event.c -o event_mine.o && cc event_mine.o rbtree.o isaac.o -o event
2597 *
2598 * ./event
2599 *
2600 * And hit CTRL-S to stop the output, CTRL-Q to continue.
2601 * It normally alternates printing the time and sleeping,
2602 * but when you hit CTRL-S/CTRL-Q, you should see a number
2603 * of events run right after each other.
2604 *
2605 * OR
2606 *
2607 * valgrind --tool=memcheck --leak-check=full --show-reachable=yes ./event
2608 */
2609
2610static void print_time(void *ctx)
2611{
2612 fr_time_t when;
2613 int64_t usec;
2614
2615 when = *(fr_time_t *) ctx;
2616 usec = fr_time_to_usec(when);
2617
2618 printf("%d.%06d\n", usec / USEC, usec % USEC);
2619 fflush(stdout);
2620}
2621
2622static fr_randctx rand_pool;
2623
2624static uint32_t event_rand(void)
2625{
2626 uint32_t num;
2627
2628 num = rand_pool.randrsl[rand_pool.randcnt++];
2629 if (rand_pool.randcnt == 256) {
2630 fr_isaac(&rand_pool);
2631 rand_pool.randcnt = 0;
2632 }
2633
2634 return num;
2635}
2636
2637
2638#define MAX 100
2639int main(int argc, char **argv)
2640{
2641 int i, rcode;
2642 fr_time_t array[MAX];
2643 fr_time_t now, when;
2645
2646 el = fr_event_list_alloc(NULL, NULL);
2647 if (!el) fr_exit_now(1);
2648
2649 memset(&rand_pool, 0, sizeof(rand_pool));
2650 rand_pool.randrsl[1] = time(NULL);
2651
2652 fr_rand_init(&rand_pool, 1);
2653 rand_pool.randcnt = 0;
2654
2655 array[0] = el->time();
2656 for (i = 1; i < MAX; i++) {
2657 array[i] = array[i - 1];
2658 array[i] += event_rand() & 0xffff;
2659
2660 fr_timer_at(NULL, el, array[i], false, print_time, array[i]);
2661 }
2662
2663 while (fr_event_list_num_timers(el)) {
2664 now = el->time();
2665 when = now;
2666 if (!fr_timer_run(el, &when)) {
2667 int delay = (when - now) / 1000; /* nanoseconds to microseconds */
2668
2669 printf("\tsleep %d microseconds\n", delay);
2670 fflush(stdout);
2671 usleep(delay);
2672 }
2673 }
2674
2675 talloc_free(el);
2676
2677 return 0;
2678}
2679#endif
int const char * file
Definition acutest.h:702
va_end(args)
static int const char * fmt
Definition acutest.h:573
int const char int line
Definition acutest.h:702
va_start(args, fmt)
#define RCSID(id)
Definition build.h:485
#define DIAG_UNKNOWN_PRAGMAS
Definition build.h:458
#define L(_str)
Helper for initialising arrays of string literals.
Definition build.h:209
#define typeof_field(_type, _field)
Typeof field.
Definition build.h:174
#define DIAG_ON(_x)
Definition build.h:460
#define CC_NO_UBSAN(_sanitize)
Definition build.h:428
#define CMP_RETURN(_a, _b, _field)
Return if the comparison is not 0 (is unequal)
Definition build.h:121
#define CMP(_a, _b)
Same as CMP_PREFER_SMALLER use when you don't really care about ordering, you just want an ordering.
Definition build.h:112
#define unlikely(_x)
Definition build.h:383
#define NDEBUG_LOCATION_VALS
Definition build.h:264
#define NDEBUG_LOCATION_ARGS
Pass caller information to the function.
Definition build.h:263
#define UNUSED
Definition build.h:317
#define NUM_ELEMENTS(_t)
Definition build.h:339
#define DIAG_OFF(_x)
Definition build.h:459
static int kq
#define fr_cond_assert(_x)
Calls panic_action ifndef NDEBUG, else logs error and evaluates to value of _x.
Definition debug.h:139
#define fr_assert_fail(_msg,...)
Calls panic_action ifndef NDEBUG, else logs error.
Definition debug.h:216
#define fr_cond_assert_msg(_x, _fmt,...)
Calls panic_action ifndef NDEBUG, else logs error and evaluates to value of _x.
Definition debug.h:156
#define fr_exit_now(_x)
Exit without calling atexit() handlers, producing a log message in debug builds.
Definition debug.h:234
int main(int argc, char **argv)
Definition dhcpclient.c:524
static void * fr_dlist_head(fr_dlist_head_t const *list_head)
Return the HEAD item of a list or NULL if the list is empty.
Definition dlist.h:486
static void * fr_dlist_remove(fr_dlist_head_t *list_head, void *ptr)
Remove an item from the list.
Definition dlist.h:638
static bool fr_dlist_entry_in_list(fr_dlist_t const *entry)
Check if a list entry is part of a list.
Definition dlist.h:163
static unsigned int fr_dlist_num_elements(fr_dlist_head_t const *head)
Return the number of elements in the dlist.
Definition dlist.h:939
static int fr_dlist_insert_tail(fr_dlist_head_t *list_head, void *ptr)
Insert an item into the tail of a list.
Definition dlist.h:378
#define fr_dlist_talloc_init(_head, _type, _field)
Initialise the head structure of a doubly linked list.
Definition dlist.h:275
#define fr_dlist_foreach_safe(_list_head, _type, _iter)
Iterate over the contents of a list allowing for removals.
Definition dlist.h:108
static void * fr_dlist_next(fr_dlist_head_t const *list_head, void const *ptr)
Get the next item in a list.
Definition dlist.h:555
Head of a doubly linked list.
Definition dlist.h:51
Entry in a doubly linked list.
Definition dlist.h:41
#define fr_event_user_insert(_ctx, _ev_p, _el, _trigger, _callback, _uctx)
Definition event.h:280
fr_event_io_func_t io
Read/write functions.
Definition event.h:215
struct fr_event_user_s fr_event_user_t
An opaque user event handle.
Definition event.h:79
void(* fr_event_fd_cb_t)(fr_event_list_t *el, int fd, int flags, void *uctx)
Called when an IO event occurs on a file descriptor.
Definition event.h:151
@ FR_EVENT_OP_SUSPEND
Temporarily remove the relevant filter from kevent.
Definition event.h:91
@ FR_EVENT_OP_RESUME
Reinsert the filter into kevent.
Definition event.h:92
fr_event_filter_t
The type of filter to install for an FD.
Definition event.h:83
@ FR_EVENT_FILTER_VNODE
Filter for vnode subfilters.
Definition event.h:85
@ FR_EVENT_FILTER_IO
Combined filter for read/write functions/.
Definition event.h:84
size_t offset
Offset of function in func struct.
Definition event.h:98
fr_timer_list_t * tl
The timer list associated with this event loop.
Definition event.h:47
struct fr_event_pid fr_event_pid_t
An opaque PID status handle.
Definition event.h:75
fr_event_fd_cb_t read
Callback for when data is available.
Definition event.h:190
void(* fr_event_pid_cb_t)(fr_event_list_t *el, pid_t pid, int status, void *uctx)
Called when a child process has exited.
Definition event.h:170
void(* fr_event_error_cb_t)(fr_event_list_t *el, int fd, int flags, int fd_errno, void *uctx)
Called when an IO error event occurs on a file descriptor.
Definition event.h:161
int(* fr_event_status_cb_t)(fr_time_t now, fr_time_delta_t wake, void *uctx)
Called after each event loop cycle.
Definition event.h:142
void(* fr_event_post_cb_t)(fr_event_list_t *el, fr_time_t now, void *uctx)
Called when a post event fires.
Definition event.h:185
fr_event_op_t op
Operation to perform on function/filter.
Definition event.h:99
void(* fr_event_user_cb_t)(fr_event_list_t *el, void *uctx)
Called when a user kevent occurs.
Definition event.h:177
#define EVENT_DEBUG(...)
Definition event.h:66
Callbacks for the FR_EVENT_FILTER_IO filter.
Definition event.h:189
Public event list structure.
Definition event.h:46
Structure describing a modification to a filter's state.
Definition event.h:97
Callbacks for the FR_EVENT_FILTER_VNODE filter.
Definition event.h:196
Union of all filter functions.
Definition event.h:214
free(array)
void fr_isaac(fr_randctx *ctx)
Definition isaac.c:46
fr_dlist_head_t pre_callbacks
callbacks when we may be idle...
Definition event.c:395
void fr_event_service(fr_event_list_t *el)
Service any outstanding timer or file descriptor events.
Definition event.c:2197
fr_dlist_head_t post_callbacks
post-processing callbacks
Definition event.c:396
static fr_event_func_map_t filter_maps[]
Definition event.c:141
static int fr_event_fd_type_set(fr_event_fd_t *ef, int fd)
Discover the type of a file descriptor.
Definition event.c:757
fr_event_func_map_entry_t * func_to_ev
Function -> Event maps coalesced, out of order.
Definition event.c:137
fr_event_error_cb_t error
Callback for when an error occurs on the FD.
Definition event.c:279
char const * file
Source file this event was last updated in.
Definition event.c:321
static int8_t fr_event_fd_cmp(void const *one, void const *two)
Compare two file descriptor handles.
Definition event.c:539
fr_event_pid_cb_t callback
callback to run when the child exits
Definition event.c:337
fr_event_funcs_t stored
Stored (set, but inactive) filter functions.
Definition event.c:277
static ssize_t fr_event_build_evset(UNUSED fr_event_list_t *el, struct kevent out_kev[], size_t outlen, fr_event_funcs_t *active, fr_event_fd_t *ef, fr_event_funcs_t const *new, fr_event_funcs_t const *prev)
Build a new evset based on function pointers present.
Definition event.c:624
fr_rb_tree_t * fds
Tree used to track FDs with filters in kqueue.
Definition event.c:382
bool is_registered
Whether this fr_event_fd_t's FD has been registered with kevent.
Definition event.c:283
char const * file
Source file this event was last updated in.
Definition event.c:296
fr_time_t fr_event_list_time(fr_event_list_t *el)
Get the current server time according to the event list.
Definition event.c:593
int fr_event_pre_delete(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx)
Delete a pre-event callback from the event list.
Definition event.c:1979
fr_event_list_t * el
Event list this event belongs to.
Definition event.c:302
static void event_list_reap_run_callback(fr_event_pid_reap_t *reap, pid_t pid, int status)
Saves some boilerplate...
Definition event.c:1615
int line
Line this event was last updated on.
Definition event.c:355
static int _event_fd_delete(fr_event_fd_t *ef)
Remove a file descriptor from the event loop and rbtree but don't explicitly free it.
Definition event.c:814
int _fr_event_pid_reap(NDEBUG_LOCATION_ARGS fr_event_list_t *el, pid_t pid, fr_event_pid_cb_t callback, void *uctx)
Asynchronously wait for a PID to exit, then reap it.
Definition event.c:1666
fr_event_filter_t filter
Definition event.c:269
#define FR_EVENT_FD_PCAP
Definition event.c:114
void * uctx
Context pointer to pass to each file descriptor callback.
Definition event.c:338
fr_event_status_cb_t callback
The callback to call.
Definition event.c:364
static void _fr_event_pid_reap_cb(UNUSED fr_event_list_t *el, pid_t pid, int status, void *uctx)
Does the actual reaping of PIDs.
Definition event.c:1623
int line
Line this event was last updated on.
Definition event.c:322
static size_t kevent_filter_table_len
Definition event.c:87
struct fr_event_list_pub_s pub
Next event list in the chain.
Definition event.c:381
int _fr_event_user_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p, bool trigger, fr_event_user_cb_t callback, void *uctx)
Add a user callback to the event list.
Definition event.c:1885
fr_event_fd_type_t type
Type of events we're interested in.
Definition event.c:272
static fr_table_num_sorted_t const fr_event_fd_type_table[]
Definition event.c:252
static size_t fr_event_fd_type_table_len
Definition event.c:258
uint16_t flags
Flags to use for inserting event.
Definition event.c:128
waitpid(reap->pid_ev->pid, &status, 0)
fr_event_pid_cb_t callback
callback to run when the child exits
Definition event.c:310
static int _event_list_free(fr_event_list_t *el)
Cleanup an event list.
Definition event.c:2425
bool dispatch
Whether the event list is currently dispatching events.
Definition event.c:389
fr_dlist_head_t fd_to_free
File descriptor events pending deletion.
Definition event.c:406
bool coalesce
Coalesce this map with the next.
Definition event.c:131
fr_dlist_t entry
Entry in free list.
Definition event.c:289
int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait)
Gather outstanding timer and file descriptor events.
Definition event.c:2062
static int _event_free_indexes(UNUSED void *uctx)
Free any memory we allocated for indexes.
Definition event.c:2439
fr_event_fd_cb_t fr_event_fd_cb(fr_event_fd_t *ef, int kq_filter, int kq_fflags)
Returns the appropriate callback function for a given event.
Definition event.c:1264
void * uctx
Context for the callback.
Definition event.c:374
bool is_registered
Whether this user event has been registered with the event loop.
Definition event.c:347
return processed
Definition event.c:1827
int type
Type this filter applies to.
Definition event.c:130
uint64_t fr_event_list_num_timers(fr_event_list_t *el)
Return the number of timer events currently scheduled.
Definition event.c:563
fr_event_func_map_t const * map
Function map between fr_event_funcs_t and kevent filters.
Definition event.c:281
void * uctx
Context for the callback.
Definition event.c:365
fr_event_post_cb_t callback
The callback to call.
Definition event.c:373
int _fr_event_pid_wait(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p, pid_t pid, fr_event_pid_cb_t callback, void *uctx)
Insert a PID event into an event list.
Definition event.c:1439
char const * name
Name of the event.
Definition event.c:126
int fr_event_user_trigger(fr_event_list_t *el, fr_event_user_t *ev)
Trigger a user event.
Definition event.c:1931
int line
Line this event was last updated on.
Definition event.c:297
uintptr_t armour
protection flag from being deleted.
Definition event.c:292
fr_event_user_cb_t callback
The callback to call.
Definition event.c:350
talloc_free(reap)
int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
Unarmour an FD.
Definition event.c:1318
int sock_type
The type of socket SOCK_STREAM, SOCK_RAW etc...
Definition event.c:274
struct fr_event_pid::@130 early_exit
Fields that are only used if we're being triggered by a user event.
fr_dlist_head_t pid_to_reap
A list of all orphaned child processes we're waiting to reap.
Definition event.c:398
uint64_t fr_event_list_num_fds(fr_event_list_t *el)
Return the number of file descriptors is_registered with this event loop.
Definition event.c:551
int fr_event_post_delete(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx)
Delete a post-event callback from the event list.
Definition event.c:2033
void * uctx
Context pointer to pass to each file descriptor callback.
Definition event.c:286
fr_event_func_idx_type_t idx_type
What type of index we use for event to function mapping.
Definition event.c:135
#define GET_FUNC(_ef, _offset)
void * fr_event_fd_uctx(fr_event_fd_t *ef)
Returns the uctx associated with an fr_event_fd_t handle.
Definition event.c:1272
static fr_event_fd_cb_t event_fd_func(fr_event_fd_t *ef, int *filter, int *fflags)
Figure out which function to call given a kevent.
Definition event.c:495
static int _fr_event_reap_free(fr_event_pid_reap_t *reap)
Definition event.c:1636
int kq
instance associated with this event list.
Definition event.c:393
pid_t pid
child to wait for
Definition event.c:307
static void event_pid_eval(fr_event_list_t *el, struct kevent *kev)
Evaluate a EVFILT_PROC event.
Definition event.c:1361
int fr_event_list_kq(fr_event_list_t *el)
Return the kq associated with an event list.
Definition event.c:575
void * uctx
Context for the callback.
Definition event.c:351
bool is_registered
Whether this user event has been registered with the event loop.
Definition event.c:304
fr_event_list_t * el
Event list this event belongs to.
Definition event.c:345
int will_exit
Will exit on next call to fr_event_corral.
Definition event.c:384
bool fr_event_list_empty(fr_event_list_t *el)
Return whether the event loop has any active events.
Definition event.c:2590
static int _event_build_indexes(UNUSED void *uctx)
Definition event.c:2447
unsigned int fr_event_list_reap_signal(fr_event_list_t *el, fr_time_delta_t timeout, int signal)
Send a signal to all the processes we have in our reap list, and reap them.
Definition event.c:1702
int16_t filter
Filter to apply.
Definition event.c:127
fr_event_list_t * fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_uctx)
Initialise a new event list.
Definition event.c:2526
static void event_fd_func_index_build(fr_event_func_map_t *map)
Definition event.c:413
static void fr_event_fd_noop(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, UNUSED void *uctx)
Placeholder callback to avoid branches in service loop.
Definition event.c:603
fr_dlist_t entry
If the fr_event_pid is in the detached, reap state, it's inserted into a list associated with the eve...
Definition event.c:332
bool fr_event_loop_exiting(fr_event_list_t *el)
Check to see whether the event loop is in the process of exiting.
Definition event.c:2386
fr_dlist_t entry
Linked list of callback.
Definition event.c:363
int _fr_event_filter_update(NDEBUG_LOCATION_ARGS fr_event_list_t *el, int fd, fr_event_filter_t filter, fr_event_update_t const updates[])
Suspend/resume a subset of filters.
Definition event.c:946
char const * file
Source file this event was last updated in.
Definition event.c:354
int num_fd_events
Number of events in this event list.
Definition event.c:391
int _fr_event_fd_move(NDEBUG_LOCATION_ARGS fr_event_list_t *dst, fr_event_list_t *src, int fd, fr_event_filter_t filter)
Move a file descriptor event from one event list to another.
Definition event.c:895
fr_event_func_map_entry_t ** ev_to_func
Function -> Event maps in index order.
Definition event.c:138
int _fr_event_fd_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd, fr_event_fd_cb_t read_fn, fr_event_fd_cb_t write_fn, fr_event_error_cb_t error, void *uctx)
Associate I/O callbacks with a file descriptor.
Definition event.c:1179
fr_event_fd_type_t
Definition event.c:93
@ FR_EVENT_FD_FILE
is a file.
Definition event.c:95
@ FR_EVENT_FD_DIRECTORY
is a directory.
Definition event.c:96
@ FR_EVENT_FD_SOCKET
is a socket.
Definition event.c:94
fr_event_pid_t const * pid_ev
pid_ev this reaper is bound to.
Definition event.c:330
fr_event_funcs_t active
Active filter functions.
Definition event.c:276
int fr_event_pre_insert(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx)
Add a pre-event callback to the event list.
Definition event.c:1957
static void _fr_event_pid_early_exit(fr_event_list_t *el, void *uctx)
Called on the next loop through the event loop when inserting an EVFILT_PROC event fails.
Definition event.c:1408
static void event_user_eval(fr_event_list_t *el, struct kevent *kev)
Definition event.c:1857
int exit
If non-zero event loop will prevent the addition of new events, and will return immediately from the ...
Definition event.c:385
fr_event_list_t * el
Event list this event belongs to.
Definition event.c:268
static fr_table_num_sorted_t const kevent_filter_table[]
Definition event.c:70
TALLOC_CTX * linked_ctx
talloc ctx this event was bound to.
Definition event.c:287
static void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags)
Definition event.c:2183
void fr_event_loop_exit(fr_event_list_t *el, int code)
Signal an event loop exit with the specified code.
Definition event.c:2375
#define FR_EV_BATCH_FDS
Definition event.c:60
void * uctx
Context pointer to pass to each file descriptor callback.
Definition event.c:311
static int _event_pid_free(fr_event_pid_t *ev)
Remove PID wait event from kevent if the fr_event_pid_t is freed.
Definition event.c:1340
fr_event_list_t * el
Event list this event belongs to.
Definition event.c:329
int fd
File descriptor we're listening for events on.
Definition event.c:270
size_t offset
Offset of function pointer in structure.
Definition event.c:125
int fr_event_fd_delete(fr_event_list_t *el, int fd, fr_event_filter_t filter)
Remove a file descriptor from the event loop.
Definition event.c:1206
fr_dlist_t entry
Linked list of callback.
Definition event.c:372
int fr_event_loop(fr_event_list_t *el)
Run an event loop.
Definition event.c:2397
fr_event_fd_t * fr_event_fd_handle(fr_event_list_t *el, int fd, fr_event_filter_t filter)
Get the opaque event handle from a file descriptor.
Definition event.c:1242
fr_rb_node_t node
Entry in the tree of file descriptor handles.
Definition event.c:264
int _fr_event_filter_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd, fr_event_filter_t filter, void *funcs, fr_event_error_cb_t error, void *uctx)
Insert a filter for the specified fd.
Definition event.c:1023
#define NOTE_EXITSTATUS
int fr_event_post_insert(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx)
Add a post-event callback to the event list.
Definition event.c:2011
fr_event_pid_t const ** parent
Definition event.c:308
static int _event_user_delete(fr_event_user_t *ev)
Memory will not be freed if we fail to remove the event from the kqueue.
Definition event.c:1839
struct kevent events[FR_EV_BATCH_FDS]
Definition event.c:401
fr_event_func_idx_type_t
Definition event.c:103
@ FR_EVENT_FUNC_IDX_FILTER
Sign flip is performed i.e. -1 = 0The filter is used / as the index in the ev to func index.
Definition event.c:106
@ FR_EVENT_FUNC_IDX_NONE
Definition event.c:104
@ FR_EVENT_FUNC_IDX_FFLAGS
The bit position of the flags in FFLAGS is used to provide the index.
Definition event.c:108
int fr_event_fd_armour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
Armour an FD.
Definition event.c:1288
bool in_handler
Deletes should be deferred until after the handlers complete.
Definition event.c:403
uint32_t fflags
fflags to pass to filter.
Definition event.c:129
A file descriptor/filter event.
Definition event.c:263
Specifies a mapping between a function pointer in a structure and its respective event.
Definition event.c:124
Stores all information relating to an event list.
Definition event.c:380
Hold additional information for automatically reaped PIDs.
Definition event.c:328
Callbacks to perform after all timers and FDs have been checked.
Definition event.c:371
Callbacks to perform when the event handler is about to check the events.
Definition event.c:362
Callbacks for kevent() user events.
Definition event.c:344
int fr_debug_lvl
Definition log.c:43
fr_log_t default_log
Definition log.c:291
void fr_vlog(fr_log_t const *log, fr_log_type_t type, char const *file, int line, char const *fmt, va_list ap)
Send a server log message to its destination.
Definition log.c:344
@ L_DBG_LVL_3
3rd highest priority debug messages (-xxx | -Xx).
Definition log.h:72
@ L_DBG
Only displayed when debugging is enabled.
Definition log.h:59
static uint8_t fr_high_bit_pos(uint64_t num)
Find the highest order high bit in an unsigned 64 bit integer.
Definition math.h:36
unsigned short uint16_t
unsigned int uint32_t
long int ssize_t
unsigned char uint8_t
unsigned long int size_t
#define fr_assert(_expr)
Definition rad_assert.h:38
void fr_rand_init(void)
Definition rand.c:34
uint32_t randrsl[256]
Definition rand.h:40
uint32_t randcnt
Definition rand.h:39
uint32_t fr_rb_num_elements(fr_rb_tree_t *tree)
Return how many nodes there are in a tree.
Definition rb.c:781
void * fr_rb_find(fr_rb_tree_t const *tree, void const *data)
Find an element in the tree, returning the data, not the node.
Definition rb.c:577
bool fr_rb_insert(fr_rb_tree_t *tree, void const *data)
Insert data into a tree.
Definition rb.c:626
bool fr_rb_delete(fr_rb_tree_t *tree, void const *data)
Remove node and free data (if a free function was specified)
Definition rb.c:741
#define fr_rb_inline_talloc_alloc(_ctx, _type, _field, _data_cmp, _data_free)
Allocs a red black that verifies elements are of a specific talloc type.
Definition rb.h:246
The main red black tree structure.
Definition rb.h:73
return count
Definition module.c:163
char const * fr_syserror(int num)
Guaranteed to be thread-safe version of strerror.
Definition syserror.c:243
#define fr_table_str_by_value(_table, _number, _def)
Convert an integer to a string.
Definition table.h:772
An element in a lexicographically sorted array of name to num mappings.
Definition table.h:49
int talloc_link_ctx(TALLOC_CTX *parent, TALLOC_CTX *child)
Link two different parent and child contexts, so the child is freed before the parent.
Definition talloc.c:171
#define fr_time_delta_to_timespec(_delta)
Convert a delta to a timespec.
Definition time.h:666
static fr_time_delta_t fr_time_delta_from_sec(int64_t sec)
Definition time.h:590
#define fr_time_delta_wrap(_time)
Definition time.h:152
#define fr_time_wrap(_time)
Definition time.h:145
#define fr_time_lteq(_a, _b)
Definition time.h:240
#define fr_time_delta_ispos(_a)
Definition time.h:290
#define fr_time_eq(_a, _b)
Definition time.h:241
static int64_t fr_time_to_usec(fr_time_t when)
Convert an fr_time_t (internal time) to number of usec since the unix epoch (wallclock time)
Definition time.h:701
#define fr_time_add(_a, _b)
Add a time/time delta together.
Definition time.h:196
#define fr_time_gt(_a, _b)
Definition time.h:237
#define USEC
Definition time.h:380
#define fr_time_sub(_a, _b)
Subtract one time from another.
Definition time.h:229
#define fr_time_neq(_a, _b)
Definition time.h:242
A time delta, a difference in time measured in nanoseconds.
Definition time.h:80
"server local" time.
Definition time.h:69
int fr_timer_list_run(fr_timer_list_t *tl, fr_time_t *when)
Execute any pending events in the event loop.
Definition timer.c:815
uint64_t fr_timer_list_num_events(fr_timer_list_t *tl)
Return number of pending events.
Definition timer.c:965
fr_time_t fr_timer_list_when(fr_timer_list_t *tl)
Return the time of the next event.
Definition timer.c:979
fr_timer_list_t * fr_timer_list_lst_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent)
Allocate a new lst based timer list.
Definition timer.c:1050
An event timer list.
Definition timer.c:53
A timer event.
Definition timer.c:79
#define fr_timer_at(...)
Definition timer.h:80
close(uq->fd)
static fr_event_list_t * el
void fr_strerror_clear(void)
Clears all pending messages from the talloc pools.
Definition strerror.c:577
#define fr_strerror_printf(_fmt,...)
Log to thread local error buffer.
Definition strerror.h:64
#define fr_strerror_printf_push(_fmt,...)
Add a message to an existing stack of messages at the tail.
Definition strerror.h:84
#define fr_strerror_const(_msg)
Definition strerror.h:223
int nonnull(2, 5))
static size_t char ** out
Definition value.h:1012