The FreeRADIUS server $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
Loading...
Searching...
No Matches
event.c
Go to the documentation of this file.
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
15 */
16
17/** Wrapper around libkqueue to make managing events easier
18 *
19 * Non-thread-safe event handling specific to FreeRADIUS.
20 *
21 * By non-thread-safe we mean multiple threads can't insert/delete
22 * events concurrently into the same event list without synchronization.
23 *
24 * @file src/lib/util/event.c
25 *
26 * @copyright 2007-2016 The FreeRADIUS server project
27 * @copyright 2016 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
28 * @copyright 2007 Alan DeKok (aland@freeradius.org)
29 */
30RCSID("$Id: 4f49c150637721b0728c3aa16dbeac697f39ddfb $")
31
32#define _EVENT_LIST_PRIVATE 1
34
35#include <freeradius-devel/util/dlist.h>
36#include <freeradius-devel/util/event.h>
37#include <freeradius-devel/util/log.h>
38#include <freeradius-devel/util/rb.h>
39#include <freeradius-devel/util/strerror.h>
40#include <freeradius-devel/util/syserror.h>
41#include <freeradius-devel/util/token.h>
42#include <freeradius-devel/util/atexit.h>
43
44#include <sys/stat.h>
45#include <sys/wait.h>
46
47#ifdef NDEBUG
48/*
49 * Turn off documentation warnings as file/line
50 * args aren't used for non-debug builds.
51 */
53DIAG_OFF(documentation)
55#endif
56
57#define FR_EV_BATCH_FDS (256)
58
59DIAG_OFF(unused-macros)
60#define fr_time() static_assert(0, "Use el->time for event loop timing")
61DIAG_ON(unused-macros)
62
63#if !defined(SO_GET_FILTER) && defined(SO_ATTACH_FILTER)
64# define SO_GET_FILTER SO_ATTACH_FILTER
65#endif
66
68#ifdef EVFILT_AIO
69 { L("EVFILT_AIO"), EVFILT_AIO },
70#endif
71#ifdef EVFILT_EXCEPT
72 { L("EVFILT_EXCEPT"), EVFILT_EXCEPT },
73#endif
74#ifdef EVFILT_MACHPORT
75 { L("EVFILT_MACHPORT"), EVFILT_MACHPORT },
76#endif
77 { L("EVFILT_PROC"), EVFILT_PROC },
78 { L("EVFILT_READ"), EVFILT_READ },
79 { L("EVFILT_SIGNAL"), EVFILT_SIGNAL },
80 { L("EVFILT_TIMER"), EVFILT_TIMER },
81 { L("EVFILT_VNODE"), EVFILT_VNODE },
82 { L("EVFILT_WRITE"), EVFILT_WRITE }
83};
85
86#ifdef EVFILT_LIBKQUEUE
87static int log_conf_kq;
88#endif
89
90typedef enum {
91 FR_EVENT_FD_SOCKET = 1, //!< is a socket.
92 FR_EVENT_FD_FILE = 2, //!< is a file.
93 FR_EVENT_FD_DIRECTORY = 4, //!< is a directory.
94
95#ifdef SO_GET_FILTER
97#endif
99
100typedef enum {
102
103 FR_EVENT_FUNC_IDX_FILTER, //!< Sign flip is performed i.e. -1 = 0The filter is used
104 //// as the index in the ev to func index.
105 FR_EVENT_FUNC_IDX_FFLAGS //!< The bit position of the flags in FFLAGS
106 ///< is used to provide the index.
107 ///< i.e. 0x01 -> 0, 0x02 -> 1, 0x08 -> 3 etc..
109
110#ifndef SO_GET_FILTER
111# define FR_EVENT_FD_PCAP 0
112#endif
113
114/** Specifies a mapping between a function pointer in a structure and its respective event
115 *
116 * If the function pointer at the specified offset is set, then a matching event
117 * will be added.
118 *
119 * If the function pointer is NULL, then any existing events will be removed.
120 */
121typedef struct {
122 size_t offset; //!< Offset of function pointer in structure.
123 char const *name; //!< Name of the event.
124 int16_t filter; //!< Filter to apply.
125 uint16_t flags; //!< Flags to use for inserting event.
126 uint32_t fflags; //!< fflags to pass to filter.
127 int type; //!< Type this filter applies to.
128 bool coalesce; //!< Coalesce this map with the next.
130
131typedef struct {
132 fr_event_func_idx_type_t idx_type; //!< What type of index we use for
133 ///< event to function mapping.
134 fr_event_func_map_entry_t *func_to_ev; //!< Function -> Event maps coalesced, out of order.
135 fr_event_func_map_entry_t **ev_to_func; //!< Function -> Event maps in index order.
137
141 .func_to_ev = (fr_event_func_map_entry_t[]){
142 {
143 .offset = offsetof(fr_event_io_func_t, read),
144 .name = "read",
145 .filter = EVFILT_READ,
146 .flags = EV_ADD | EV_ENABLE,
147#ifdef NOTE_NONE
148 .fflags = NOTE_NONE,
149#else
150 .fflags = 0,
151#endif
153 },
154 {
155 .offset = offsetof(fr_event_io_func_t, write),
156 .name = "write",
157 .filter = EVFILT_WRITE,
158 .flags = EV_ADD | EV_ENABLE,
159 .fflags = 0,
161 },
162 { 0 }
163 }
164 },
166 .idx_type = FR_EVENT_FUNC_IDX_FFLAGS,
167 .func_to_ev = (fr_event_func_map_entry_t[]){
168 {
169 .offset = offsetof(fr_event_vnode_func_t, delete),
170 .name = "delete",
171 .filter = EVFILT_VNODE,
172 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
173 .fflags = NOTE_DELETE,
175 .coalesce = true
176 },
177 {
178 .offset = offsetof(fr_event_vnode_func_t, write),
179 .name = "write",
180 .filter = EVFILT_VNODE,
181 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
182 .fflags = NOTE_WRITE,
183 .type = FR_EVENT_FD_FILE,
184 .coalesce = true
185 },
186 {
187 .offset = offsetof(fr_event_vnode_func_t, extend),
188 .name = "extend",
189 .filter = EVFILT_VNODE,
190 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
191 .fflags = NOTE_EXTEND,
193 .coalesce = true
194 },
195 {
196 .offset = offsetof(fr_event_vnode_func_t, attrib),
197 .name = "attrib",
198 .filter = EVFILT_VNODE,
199 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
200 .fflags = NOTE_ATTRIB,
201 .type = FR_EVENT_FD_FILE,
202 .coalesce = true
203 },
204 {
205 .offset = offsetof(fr_event_vnode_func_t, link),
206 .name = "link",
207 .filter = EVFILT_VNODE,
208 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
209 .fflags = NOTE_LINK,
210 .type = FR_EVENT_FD_FILE,
211 .coalesce = true
212 },
213 {
214 .offset = offsetof(fr_event_vnode_func_t, rename),
215 .name = "rename",
216 .filter = EVFILT_VNODE,
217 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
218 .fflags = NOTE_RENAME,
219 .type = FR_EVENT_FD_FILE,
220 .coalesce = true
221 },
222#ifdef NOTE_REVOKE
223 {
224 .offset = offsetof(fr_event_vnode_func_t, revoke),
225 .name = "revoke",
226 .filter = EVFILT_VNODE,
227 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
228 .fflags = NOTE_REVOKE,
229 .type = FR_EVENT_FD_FILE,
230 .coalesce = true
231 },
232#endif
233#ifdef NOTE_FUNLOCK
234 {
235 .offset = offsetof(fr_event_vnode_func_t, funlock),
236 .name = "funlock",
237 .filter = EVFILT_VNODE,
238 .flags = EV_ADD | EV_ENABLE | EV_CLEAR,
239 .fflags = NOTE_FUNLOCK,
240 .type = FR_EVENT_FD_FILE,
241 .coalesce = true
242 },
243#endif
244 { 0 }
245 }
246 }
247};
248
250 { L("directory"), FR_EVENT_FD_DIRECTORY },
251 { L("file"), FR_EVENT_FD_FILE },
252 { L("pcap"), FR_EVENT_FD_PCAP },
253 { L("socket"), FR_EVENT_FD_SOCKET }
254};
256
257/** A file descriptor/filter event
258 *
259 */
261 fr_rb_node_t node; //!< Entry in the tree of file descriptor handles.
262 ///< this should really go away and we should pass around
263 ///< handles directly.
264
265 fr_event_list_t *el; //!< Event list this event belongs to.
267 int fd; //!< File descriptor we're listening for events on.
268
269 fr_event_fd_type_t type; //!< Type of events we're interested in.
270
271 int sock_type; //!< The type of socket SOCK_STREAM, SOCK_RAW etc...
272
273 fr_event_funcs_t active; //!< Active filter functions.
274 fr_event_funcs_t stored; //!< Stored (set, but inactive) filter functions.
275
276 fr_event_error_cb_t error; //!< Callback for when an error occurs on the FD.
277
278 fr_event_func_map_t const *map; //!< Function map between #fr_event_funcs_t and kevent filters.
279
280 bool is_registered; //!< Whether this fr_event_fd_t's FD has been registered with
281 ///< kevent. Mostly for debugging.
282
283 void *uctx; //!< Context pointer to pass to each file descriptor callback.
284 TALLOC_CTX *linked_ctx; //!< talloc ctx this event was bound to.
285
286 fr_dlist_t entry; //!< Entry in free list.
287
288#ifndef NDEBUG
289 uintptr_t armour; //!< protection flag from being deleted.
290#endif
291
292#ifndef NDEBUG
293 char const *file; //!< Source file this event was last updated in.
294 int line; //!< Line this event was last updated on.
295#endif
296};
297
299 fr_event_list_t *el; //!< Event list this event belongs to.
300
301 bool is_registered; //!< Whether this user event has been registered
302 ///< with the event loop.
303
304 pid_t pid; //!< child to wait for
306
307 fr_event_pid_cb_t callback; //!< callback to run when the child exits
308 void *uctx; //!< Context pointer to pass to each file descriptor callback.
309
310 /** Fields that are only used if we're being triggered by a user event
311 */
312 struct {
313 fr_event_user_t *ev; //!< Fallback user event we use to raise a PID event when
314 ///< a race occurs with kevent.
315 int status; //!< Status we got from waitid.
317#ifndef NDEBUG
318 char const *file; //!< Source file this event was last updated in.
319 int line; //!< Line this event was last updated on.
320#endif
321};
322
323/** Hold additional information for automatically reaped PIDs
324 */
325typedef struct {
326 fr_event_list_t *el; //!< Event list this event belongs to.
327 fr_event_pid_t const *pid_ev; //!< pid_ev this reaper is bound to.
328
329 fr_dlist_t entry; //!< If the fr_event_pid is in the detached, reap state,
330 ///< it's inserted into a list associated with the event.
331 //!< We then send SIGKILL, and forcefully reap the process
332 ///< on exit.
333
334 fr_event_pid_cb_t callback; //!< callback to run when the child exits
335 void *uctx; //!< Context pointer to pass to each file descriptor callback.
337
338/** Callbacks for kevent() user events
339 *
340 */
342 fr_event_list_t *el; //!< Event list this event belongs to.
343
344 bool is_registered; //!< Whether this user event has been registered
345 ///< with the event loop.
346
347 fr_event_user_cb_t callback; //!< The callback to call.
348 void *uctx; //!< Context for the callback.
349
350#ifndef NDEBUG
351 char const *file; //!< Source file this event was last updated in.
352 int line; //!< Line this event was last updated on.
353#endif
354};
355
356/** Callbacks to perform when the event handler is about to check the events
357 *
358 */
359typedef struct {
360 fr_dlist_t entry; //!< Linked list of callback.
361 fr_event_status_cb_t callback; //!< The callback to call.
362 void *uctx; //!< Context for the callback.
364
365/** Callbacks to perform after all timers and FDs have been checked
366 *
367 */
368typedef struct {
369 fr_dlist_t entry; //!< Linked list of callback.
370 fr_event_post_cb_t callback; //!< The callback to call.
371 void *uctx; //!< Context for the callback.
373
374/** Stores all information relating to an event list
375 *
376 */
378 struct fr_event_list_pub_s pub; //!< Next event list in the chain.
379 fr_rb_tree_t *fds; //!< Tree used to track FDs with filters in kqueue.
380
381 int will_exit; //!< Will exit on next call to fr_event_corral.
382 int exit; //!< If non-zero event loop will prevent the addition
383 ///< of new events, and will return immediately
384 ///< from the corral/service function.
385
386 bool dispatch; //!< Whether the event list is currently dispatching events.
387
388 int num_fd_events; //!< Number of events in this event list.
389
390 int kq; //!< instance associated with this event list.
391
392 fr_dlist_head_t pre_callbacks; //!< callbacks when we may be idle...
393 fr_dlist_head_t post_callbacks; //!< post-processing callbacks
394
395 fr_dlist_head_t pid_to_reap; //!< A list of all orphaned child processes we're
396 ///< waiting to reap.
397
398 struct kevent events[FR_EV_BATCH_FDS]; /* so it doesn't go on the stack every time */
399
400 bool in_handler; //!< Deletes should be deferred until after the
401 ///< handlers complete.
402
403 fr_dlist_head_t fd_to_free; //!< File descriptor events pending deletion.
404
405#ifdef WITH_EVENT_DEBUG
406 fr_timer_t *report; //!< Report event.
407#endif
408};
409
411{
412 switch (map->idx_type) {
413 default:
414 return;
415
416 /*
417 * - Figure out the lowest filter value
418 * - Invert it
419 * - Allocate an array
420 * - Populate the array
421 */
423 {
424 int low = 0;
426
427 for (entry = map->func_to_ev; entry->name; entry++) if (entry->filter < low) low = entry->filter;
428
429 map->ev_to_func = talloc_zero_array(NULL, fr_event_func_map_entry_t *, ~low + 1);
430 if (unlikely(!map->ev_to_func)) abort();
431
432 for (entry = map->func_to_ev; entry->name; entry++) map->ev_to_func[~entry->filter] = entry;
433 }
434 break;
435
436 /*
437 * - Figure out the highest bit position
438 * - Allocate an array
439 * - Populate the array
440 */
442 {
443 uint8_t high = 0, pos;
445
446 for (entry = map->func_to_ev; entry->name; entry++) {
447 pos = fr_high_bit_pos(entry->fflags);
448 if (pos > high) high = pos;
449 }
450
451 map->ev_to_func = talloc_zero_array(NULL, fr_event_func_map_entry_t *, high);
452 if (unlikely(!map->ev_to_func)) abort();
453
454 for (entry = map->func_to_ev; entry->name; entry++) {
455 typeof_field(fr_event_func_map_entry_t, fflags) fflags = entry->fflags;
456
457 /*
458 * Multiple notes can be associated
459 * with the same function.
460 */
461 while ((pos = fr_high_bit_pos(fflags))) {
462 pos -= 1;
463 map->ev_to_func[pos] = entry;
464 /*
465 * Coverity thinks that after this decrement, pos
466 * can be 255 even though the loop condition precludes
467 * it. Adding a Coverity-only check won't change that,
468 * so we're stuck with annotation.
469 */
470 /* coverity[overflow_const] */
471 fflags &= ~(1 << pos);
472 }
473 }
474 }
475 break;
476 }
477}
478
479/** Figure out which function to call given a kevent
480 *
481 * This function should be called in a loop until it returns NULL.
482 *
483 * @param[in] ef File descriptor state handle.
484 * @param[in] filter from the kevent.
485 * @param[in,out] fflags from the kevent. Each call will return the function
486 * from the next most significant NOTE_*, with each
487 * NOTE_* before unset from fflags.
488 * @return
489 * - NULL there are no more callbacks to call.
490 * - The next callback to call.
491 */
492static inline CC_HINT(always_inline) fr_event_fd_cb_t event_fd_func(fr_event_fd_t *ef, int *filter, int *fflags)
493{
494 fr_event_func_map_t const *map = ef->map;
495
496#define GET_FUNC(_ef, _offset) *((fr_event_fd_cb_t const *)((uint8_t const *)&(_ef)->active + _offset))
497
498 switch (map->idx_type) {
499 default:
500 fr_assert_fail("Invalid index type %u", map->idx_type);
501 return NULL;
502
504 {
505 int idx;
506
507 if (!*filter) return NULL;
508
509 idx = ~*filter; /* Consume the filter */
510 *filter = 0;
511
512 return GET_FUNC(ef, map->ev_to_func[idx]->offset);
513 }
514
516 {
517 int our_fflags = *fflags;
518 uint8_t pos = fr_high_bit_pos(our_fflags);
519
520 if (!pos) return NULL; /* No more fflags to consume */
521 pos -= 1; /* Saves an array element */
522
523 *fflags = our_fflags & ~(1 << pos); /* Consume the knote */
524
525 return GET_FUNC(ef, map->ev_to_func[pos]->offset);
526 }
527 }
528}
529
530/** Compare two file descriptor handles
531 *
532 * @param[in] one the first file descriptor handle.
533 * @param[in] two the second file descriptor handle.
534 * @return CMP(one, two)
535 */
536static int8_t fr_event_fd_cmp(void const *one, void const *two)
537{
538 fr_event_fd_t const *a = one, *b = two;
539
540 CMP_RETURN(a, b, fd);
541
542 return CMP(a->filter, b->filter);
543}
544
545/** Return the number of file descriptors is_registered with this event loop
546 *
547 */
549{
550 if (unlikely(!el)) return -1;
551
552 return fr_rb_num_elements(el->fds);
553}
554
555/** Return the number of timer events currently scheduled
556 *
557 * @param[in] el to return timer events for.
558 * @return number of timer events.
559 */
561{
562 if (unlikely(!el)) return -1;
563
565}
566
567/** Return the kq associated with an event list.
568 *
569 * @param[in] el to return timer events for.
570 * @return kq
571 */
573{
574 if (unlikely(!el)) return -1;
575
576 return el->kq;
577}
578
579/** Get the current server time according to the event list
580 *
581 * If the event list is currently dispatching events, we return the time
582 * this iteration of the event list started.
583 *
584 * If the event list is not currently dispatching events, we return the
585 * current system time.
586 *
587 * @param[in] el to get time from.
588 * @return the current time according to the event list.
589 */
591{
592 return el->pub.tl->time();
593}
594
595/** Placeholder callback to avoid branches in service loop
596 *
597 * This is set in place of any NULL function pointers, so that the event loop doesn't
598 * SEGV if a filter callback function is unset between corral and service.
599 */
600static void fr_event_fd_noop(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, UNUSED void *uctx)
601{
602 return;
603}
604
605/** Build a new evset based on function pointers present
606 *
607 * @note The contents of active functions may be inconsistent if this function errors. But the
608 * only time that will occur is if the caller passed invalid arguments.
609 *
610 * @param[in] el we're building events for.
611 * @param[out] out_kev where to write the evset.
612 * @param[in] outlen length of output buffer.
613 * @param[out] active The set of function pointers with active filters.
614 * @param[in] ef event to insert.
615 * @param[in] new Functions to map to filters.
616 * @param[in] prev Previous set of functions mapped to filters.
617 * @return
618 * - >= 0 the number of changes written to out.
619 * - < 0 an error occurred.
620 */
622#ifndef WITH_EVENT_DEBUG
623 UNUSED
624#endif
626 struct kevent out_kev[], size_t outlen, fr_event_funcs_t *active,
627 fr_event_fd_t *ef,
628 fr_event_funcs_t const *new, fr_event_funcs_t const *prev)
629{
630 struct kevent *out = out_kev, *end = out + outlen;
631 fr_event_func_map_entry_t const *map;
632 struct kevent add[10], *add_p = add;
633 size_t i;
634
635 EVENT_DEBUG("%p - Building new evset for FD %i (new %p, prev %p)", el, ef->fd, new, prev);
636
637 /*
638 * Iterate over the function map, setting/unsetting
639 * filters and filter flags.
640 */
641 for (map = ef->map->func_to_ev; map->name; map++) {
642 bool has_current_func = false;
643 bool has_prev_func = false;
644 uint32_t current_fflags = 0;
645 uint32_t prev_fflags = 0;
646
647 do {
648 fr_event_fd_cb_t prev_func;
649 fr_event_fd_cb_t new_func;
650
651 /*
652 * If the previous value was the 'noop'
653 * callback, it's identical to being unset.
654 */
655 prev_func = *(fr_event_fd_cb_t const *)((uint8_t const *)prev + map->offset);
656 if (prev_func && (prev_func != fr_event_fd_noop)) {
657 EVENT_DEBUG("\t%s prev set (%p)", map->name, prev_func);
658 prev_fflags |= map->fflags;
659 has_prev_func = true;
660 } else {
661 EVENT_DEBUG("\t%s prev unset", map->name);
662 }
663
664 new_func = *(fr_event_fd_cb_t const *)((uint8_t const *)new + map->offset);
665 if (new_func && (new_func != fr_event_fd_noop)) {
666 EVENT_DEBUG("\t%s curr set (%p)", map->name, new_func);
667 current_fflags |= map->fflags;
668 has_current_func = true;
669
670 /*
671 * Check the filter will work for the
672 * type of file descriptor specified.
673 */
674 if (!(map->type & ef->type)) {
675 fr_strerror_printf("kevent %s (%s), can't be applied to fd of type %s",
676 map->name,
679 map->type, "<INVALID>"));
680 return -1;
681 }
682
683 /*
684 * Mark this filter function as active
685 */
686 memcpy((uint8_t *)active + map->offset, (uint8_t const *)new + map->offset,
687 sizeof(fr_event_fd_cb_t));
688 } else {
689 EVENT_DEBUG("\t%s curr unset", map->name);
690
691 /*
692 * Mark this filter function as inactive
693 * by setting it to the 'noop' callback.
694 */
695 *((fr_event_fd_cb_t *)((uint8_t *)active + map->offset)) = fr_event_fd_noop;
696 }
697
698 if (!(map + 1)->coalesce) break;
699 map++;
700 } while (1);
701
702 if (out > end) {
703 fr_strerror_const("Out of memory to store kevent filters");
704 return -1;
705 }
706
707 /*
708 * Upsert if we add a function or change the flags.
709 */
710 if (has_current_func &&
711 (!has_prev_func || (current_fflags != prev_fflags))) {
712 if ((size_t)(add_p - add) >= (NUM_ELEMENTS(add))) {
713 fr_strerror_const("Out of memory to store kevent EV_ADD filters");
714 return -1;
715 }
716 EVENT_DEBUG("\tEV_SET EV_ADD filter %s (%i), flags %i, fflags %i",
718 map->filter, map->flags, current_fflags);
719 EV_SET(add_p++, ef->fd, map->filter, map->flags, current_fflags, 0, ef);
720
721 /*
722 * Delete if we remove a function.
723 */
724 } else if (!has_current_func && has_prev_func) {
725 EVENT_DEBUG("\tEV_SET EV_DELETE filter %s (%i), flags %i, fflags %i",
727 map->filter, EV_DELETE, 0);
728 EV_SET(out++, ef->fd, map->filter, EV_DELETE, 0, 0, ef);
729 }
730 }
731
732 /*
733 * kevent is fine with adds/deletes in the same operation
734 * on the same file descriptor, but libkqueue doesn't do
735 * any kind of coalescing or ordering so you get an EEXIST
736 * error.
737 */
738 for (i = 0; i < (size_t)(add_p - add); i++) memcpy(out++, &add[i], sizeof(*out));
739
740 return out - out_kev;
741}
742
743/** Discover the type of a file descriptor
744 *
745 * This function writes the result of the discovery to the ef->type,
746 * and ef->sock_type fields.
747 *
748 * @param[out] ef to write type data to.
749 * @param[in] fd to discover the type of.
750 * @return
751 * - 0 on success.
752 * - -1 on failure.
753 */
754static int fr_event_fd_type_set(fr_event_fd_t *ef, int fd)
755{
756 socklen_t opt_len = sizeof(ef->sock_type);
757
758 /*
759 * It's a socket or PCAP socket
760 */
761 if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &ef->sock_type, &opt_len) == 0) {
762#ifdef SO_GET_FILTER
763 opt_len = 0;
764 if (unlikely(getsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, NULL, &opt_len) < 0)) {
765 fr_strerror_printf("Failed determining PF status: %s", fr_syserror(errno));
766 return -1;
767 }
768 if (opt_len) {
770 } else
771#endif
772 {
774 }
775
776 /*
777 * It's a file or directory
778 */
779 } else {
780 struct stat buf;
781
782 if (errno != ENOTSOCK) {
783 fr_strerror_printf("Failed retrieving socket type: %s", fr_syserror(errno));
784 return -1;
785 }
786
787 if (fstat(fd, &buf) < 0) {
788 fr_strerror_printf("Failed calling stat() on file: %s", fr_syserror(errno));
789 return -1;
790 }
791
792 if (S_ISDIR(buf.st_mode)) {
794 } else {
796 }
797 }
798 ef->fd = fd;
799
800 return 0;
801}
802
803/** Remove a file descriptor from the event loop and rbtree but don't explicitly free it
804 *
805 *
806 * @param[in] ef to remove.
807 * @return
808 * - 0 on success.
809 * - -1 on error;
810 */
812{
813 struct kevent evset[10];
814 int count = 0;
815 fr_event_list_t *el = ef->el;
816 fr_event_funcs_t funcs;
817
818 /*
819 * Already been removed from the various trees and
820 * the event loop.
821 */
822 if (ef->is_registered) {
823 memset(&funcs, 0, sizeof(funcs));
824
825 fr_assert(ef->armour == 0);
826
827 /*
828 * If this fails, it's a pretty catastrophic error.
829 */
830 count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
831 &ef->active, ef, &funcs, &ef->active);
832 if (count > 0) {
833 int ret;
834
835 /*
836 * If this fails, assert on debug builds.
837 */
838 ret = kevent(el->kq, evset, count, NULL, 0, NULL);
839 if (!fr_cond_assert_msg(ret >= 0,
840 "FD %i was closed without being removed from the KQ: %s",
841 ef->fd, fr_syserror(errno))) {
842 return -1; /* Prevent the free, and leave the fd in the trees */
843 }
844 }
845
846 fr_rb_delete(el->fds, ef);
847 ef->is_registered = false;
848 }
849
850 /*
851 * Insert into the deferred free list, event will be
852 * freed later.
853 */
854 if (el->in_handler) {
855 /*
856 * Don't allow the same event to be
857 * inserted into the free list multiple
858 * times.
859 *
860 * This can happen if the same ef is
861 * delivered by multiple filters, i.e.
862 * if EVFILT_READ and EVFILT_WRITE
863 * were both high, and both handlers
864 * attempted to delete the event
865 * we'd need to prevent the event being
866 * inserted into the free list multiple
867 * times.
868 */
870 return -1; /* Will be freed later */
871 } else if (fr_dlist_entry_in_list(&ef->entry)) {
873 }
874
875 return 0;
876}
877
878/** Move a file descriptor event from one event list to another
879 *
880 * FIXME - Move suspended events too.
881 *
882 * @note Any pending events will not be transferred.
883 *
884 * @param[in] dst Event list to move file descriptor event to.
885 * @param[in] src Event list to move file descriptor from.
886 * @param[in] fd of the event to move.
887 * @param[in] filter of the event to move.
888 * @return
889 * - 0 on success.
890 * - -1 on failure. The event will remain active in the src list.
891 */
893 fr_event_list_t *dst, fr_event_list_t *src, int fd, fr_event_filter_t filter)
894{
895 fr_event_fd_t *ef;
896 int ret;
897
898 if (fr_event_loop_exiting(dst)) {
899 fr_strerror_const("Destination event loop exiting");
900 return -1;
901 }
902
903 /*
904 * Ensure this exists
905 */
906 ef = fr_rb_find(src->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
907 if (unlikely(!ef)) {
908 fr_strerror_printf("No events are registered for fd %i", fd);
909 return -1;
910 }
911
913 ef->linked_ctx, NULL,
914 dst, ef->fd, ef->filter, &ef->active, ef->error, ef->uctx);
915 if (ret < 0) return -1;
916
917 (void)fr_event_fd_delete(src, ef->fd, ef->filter);
918
919 return ret;
920}
921
922
923/** Suspend/resume a subset of filters
924 *
925 * This function trades producing useful errors for speed.
926 *
927 * An example of suspending the read filter for an FD would be:
928 @code {.c}
929 static fr_event_update_t pause_read[] = {
930 FR_EVENT_SUSPEND(fr_event_io_func_t, read),
931 { 0 }
932 }
933
934 fr_event_filter_update(el, fd, FR_EVENT_FILTER_IO, pause_read);
935 @endcode
936 *
937 * @param[in] el to update descriptor in.
938 * @param[in] fd to update filters for.
939 * @param[in] filter The type of filter to update.
940 * @param[in] updates An array of updates to toggle filters on/off without removing
941 * the callback function.
942 */
944 fr_event_list_t *el, int fd, fr_event_filter_t filter, fr_event_update_t const updates[])
945{
946 fr_event_fd_t *ef;
947 size_t i;
948 fr_event_funcs_t curr_active, curr_stored;
949 struct kevent evset[10];
950 int count = 0;
951
952 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
953 if (unlikely(!ef)) {
954 fr_strerror_printf("No events are registered for fd %i", fd);
955 return -1;
956 }
957
958#ifndef NDEBUG
959 ef->file = file;
960 ef->line = line;
961#endif
962
963 /*
964 * Cheapest way of ensuring this function can error without
965 * leaving everything in an inconsistent state.
966 */
967 memcpy(&curr_active, &ef->active, sizeof(curr_active));
968 memcpy(&curr_stored, &ef->stored, sizeof(curr_stored));
969
970 /*
971 * Apply modifications to our copies of the active/stored array.
972 */
973 for (i = 0; updates[i].op; i++) {
974 switch (updates[i].op) {
975 default:
977 fr_assert(ef->armour == 0); /* can't suspect protected FDs */
978 memcpy((uint8_t *)&ef->stored + updates[i].offset,
979 (uint8_t *)&ef->active + updates[i].offset, sizeof(fr_event_fd_cb_t));
980 memset((uint8_t *)&ef->active + updates[i].offset, 0, sizeof(fr_event_fd_cb_t));
981 break;
982
984 memcpy((uint8_t *)&ef->active + updates[i].offset,
985 (uint8_t *)&ef->stored + updates[i].offset, sizeof(fr_event_fd_cb_t));
986 memset((uint8_t *)&ef->stored + updates[i].offset, 0, sizeof(fr_event_fd_cb_t));
987 break;
988 }
989 }
990
991 count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset), &ef->active,
992 ef, &ef->active, &curr_active);
993 if (unlikely(count < 0)) {
994 error:
995 memcpy(&ef->active, &curr_active, sizeof(curr_active));
996 memcpy(&ef->stored, &curr_stored, sizeof(curr_stored));
997 return -1;
998 }
999
1000 if (count && unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0)) {
1001 fr_strerror_printf("Failed updating filters for FD %i: %s", ef->fd, fr_syserror(errno));
1002 goto error;
1003 }
1004
1005 return 0;
1006}
1007
1008/** Insert a filter for the specified fd
1009 *
1010 * @param[in] ctx to bind lifetime of the event to.
1011 * @param[out] ef_out Previously allocated ef, or NULL.
1012 * @param[in] el to insert fd callback into.
1013 * @param[in] fd to install filters for.
1014 * @param[in] filter one of the #fr_event_filter_t values.
1015 * @param[in] funcs Structure containing callback functions. If a function pointer
1016 * is set, the equivalent kevent filter will be installed.
1017 * @param[in] error function to call when an error occurs on the fd.
1018 * @param[in] uctx to pass to handler.
1019 */
1021 TALLOC_CTX *ctx, fr_event_fd_t **ef_out,
1022 fr_event_list_t *el, int fd,
1023 fr_event_filter_t filter,
1024 void *funcs, fr_event_error_cb_t error,
1025 void *uctx)
1026{
1027 ssize_t count;
1028 fr_event_fd_t *ef;
1029 fr_event_funcs_t active;
1030 struct kevent evset[10];
1031
1032 if (unlikely(!el)) {
1033 fr_strerror_const("Invalid argument: NULL event list");
1034 return -1;
1035 }
1036
1037 if (unlikely(fd < 0)) {
1038 fr_strerror_printf("Invalid arguments: Bad FD %i", fd);
1039 return -1;
1040 }
1041
1042 if (unlikely(el->exit)) {
1043 fr_strerror_const("Event loop exiting");
1044 return -1;
1045 }
1046
1047 if (!ef_out || !*ef_out) {
1048 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1049 } else {
1050 ef = *ef_out;
1051 fr_assert((fd < 0) || (ef->fd == fd));
1052 }
1053
1054 /*
1055 * Need to free the event to change the talloc link.
1056 *
1057 * This is generally bad. If you hit this
1058 * code path you probably screwed up somewhere.
1059 */
1060 if (unlikely(ef && (ef->linked_ctx != ctx))) TALLOC_FREE(ef);
1061
1062 /*
1063 * No pre-existing event. Allocate an entry
1064 * for insertion into the rbtree.
1065 */
1066 if (!ef) {
1067 ef = talloc_zero(el, fr_event_fd_t);
1068 if (unlikely(!ef)) {
1069 fr_strerror_const("Out of memory");
1070 return -1;
1071 }
1072 talloc_set_destructor(ef, _event_fd_delete);
1073
1074 /*
1075 * Bind the lifetime of the event to the specified
1076 * talloc ctx. If the talloc ctx is freed, the
1077 * event will also be freed.
1078 */
1079 if (ctx != el) talloc_link_ctx(ctx, ef);
1080 ef->linked_ctx = ctx;
1081 ef->el = el;
1082
1083 /*
1084 * Determine what type of file descriptor
1085 * this is.
1086 */
1087 if (fr_event_fd_type_set(ef, fd) < 0) {
1088 free:
1089 talloc_free(ef);
1090 return -1;
1091 }
1092
1093 /*
1094 * Check the filter value is valid
1095 */
1096 if ((filter > (NUM_ELEMENTS(filter_maps) - 1))) {
1097 not_supported:
1098 fr_strerror_printf("Filter %u not supported", filter);
1099 goto free;
1100 }
1101 ef->map = &filter_maps[filter];
1102 if (ef->map->idx_type == FR_EVENT_FUNC_IDX_NONE) goto not_supported;
1103
1104 count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
1105 &ef->active, ef, funcs, &ef->active);
1106 if (count < 0) goto free;
1107 if (count && (unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0))) {
1108 fr_strerror_printf("Failed inserting filters for FD %i: %s", fd, fr_syserror(errno));
1109 goto free;
1110 }
1111
1112 ef->filter = filter;
1113 fr_rb_insert(el->fds, ef);
1114 ef->is_registered = true;
1115
1116 /*
1117 * Pre-existing event, update the filters and
1118 * functions associated with the file descriptor.
1119 */
1120 } else {
1121 fr_assert(ef->is_registered == true);
1122
1123 /*
1124 * Take a copy of the current set of active
1125 * functions, so we can error out in a
1126 * consistent state.
1127 */
1128 memcpy(&active, &ef->active, sizeof(ef->active));
1129
1130 fr_assert((ef->armour == 0) || ef->active.io.read);
1131
1132 count = fr_event_build_evset(el, evset, sizeof(evset)/sizeof(*evset),
1133 &ef->active, ef, funcs, &ef->active);
1134 if (count < 0) {
1135 error:
1136 memcpy(&ef->active, &active, sizeof(ef->active));
1137 return -1;
1138 }
1139 if (count && (unlikely(kevent(el->kq, evset, count, NULL, 0, NULL) < 0))) {
1140 fr_strerror_printf("Failed modifying filters for FD %i: %s", fd, fr_syserror(errno));
1141 goto error;
1142 }
1143
1144 /*
1145 * Clear any previously suspended functions
1146 */
1147 memset(&ef->stored, 0, sizeof(ef->stored));
1148 }
1149
1150#ifndef NDEBUG
1151 ef->file = file;
1152 ef->line = line;
1153#endif
1154 ef->error = error;
1155 ef->uctx = uctx;
1156
1157 if (ef_out) *ef_out = ef;
1158
1159 return 0;
1160}
1161
1162/** Associate I/O callbacks with a file descriptor
1163 *
1164 * @param[in] ctx to bind lifetime of the event to.
1165 * @param[out] ef_out Where to store the output event
1166 * @param[in] el to insert fd callback into.
1167 * @param[in] fd to install filters for.
1168 * @param[in] read_fn function to call when fd is readable.
1169 * @param[in] write_fn function to call when fd is writable.
1170 * @param[in] error function to call when an error occurs on the fd.
1171 * @param[in] uctx to pass to handler.
1172 * @return
1173 * - 0 on success.
1174 * - -1 on failure.
1175 */
1177 TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd,
1178 fr_event_fd_cb_t read_fn,
1179 fr_event_fd_cb_t write_fn,
1180 fr_event_error_cb_t error,
1181 void *uctx)
1182{
1183 fr_event_io_func_t funcs = { .read = read_fn, .write = write_fn };
1184
1185 if (unlikely(!read_fn && !write_fn)) {
1186 fr_strerror_const("Invalid arguments: All callbacks are NULL");
1187 return -1;
1188 }
1189
1191 ctx, ef_out, el, fd, FR_EVENT_FILTER_IO, &funcs, error, uctx);
1192}
1193
1194/** Remove a file descriptor from the event loop
1195 *
1196 * @param[in] el to remove file descriptor from.
1197 * @param[in] fd to remove.
1198 * @param[in] filter The type of filter to remove.
1199 * @return
1200 * - 0 if file descriptor was removed.
1201 * - <0 on error.
1202 */
1204{
1205 fr_event_fd_t *ef;
1206
1207 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1208 if (unlikely(!ef)) {
1209 fr_strerror_printf("No events are registered for fd %d, filter %u", fd, filter);
1210 return -1;
1211 }
1212
1213 /*
1214 * Free will normally fail if it's
1215 * a deferred free. There is a special
1216 * case for kevent failures though.
1217 *
1218 * We distinguish between the two by
1219 * looking to see if the ef is still
1220 * in the even tree.
1221 *
1222 * Talloc returning -1 guarantees the
1223 * memory has not been freed.
1224 */
1225 if ((talloc_free(ef) == -1) && ef->is_registered) return -1;
1226
1227 return 0;
1228}
1229
1230/** Get the opaque event handle from a file descriptor
1231 *
1232 * @param[in] el to search for fd/filter in.
1233 * @param[in] fd to search for.
1234 * @param[in] filter to search for.
1235 * @return
1236 * - NULL if no event could be found.
1237 * - The opaque handle representing an fd event.
1238 */
1240{
1241 fr_event_fd_t *ef;
1242
1243 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1244 if (unlikely(!ef)) {
1245 fr_strerror_printf("No events are registered for fd %i", fd);
1246 return NULL;
1247 }
1248
1249 return ef;
1250}
1251
1252/** Returns the appropriate callback function for a given event
1253 *
1254 * @param[in] ef the event filter fd handle.
1255 * @param[in] kq_filter If the callbacks are indexed by filter.
1256 * @param[in] kq_fflags If the callbacks are indexed by NOTES (fflags).
1257 * @return
1258 * - NULL if no event it associated with the given ef/kq_filter or kq_fflags combo.
1259 * - The callback that would be called if an event with this filter/fflag combo was received.
1260 */
1261fr_event_fd_cb_t fr_event_fd_cb(fr_event_fd_t *ef, int kq_filter, int kq_fflags)
1262{
1263 return event_fd_func(ef, &kq_filter, &kq_fflags);
1264}
1265
1266/** Returns the uctx associated with an fr_event_fd_t handle
1267 *
1268 */
1270{
1271 return ef->uctx;
1272}
1273
1274#ifndef NDEBUG
1275/** Armour an FD
1276 *
1277 * @param[in] el to remove file descriptor from.
1278 * @param[in] fd to remove.
1279 * @param[in] filter The type of filter to remove.
1280 * @param[in] armour The armour to add.
1281 * @return
1282 * - 0 if file descriptor was armoured
1283 * - <0 on error.
1284 */
1285int fr_event_fd_armour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
1286{
1287 fr_event_fd_t *ef;
1288
1289 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1290 if (unlikely(!ef)) {
1291 fr_strerror_printf("No events are registered for fd %i", fd);
1292 return -1;
1293 }
1294
1295 if (ef->armour != 0) {
1296 fr_strerror_printf("FD %i is already armoured", fd);
1297 return -1;
1298 }
1299
1300 ef->armour = armour;
1301
1302 return 0;
1303}
1304
1305/** Unarmour an FD
1306 *
1307 * @param[in] el to remove file descriptor from.
1308 * @param[in] fd to remove.
1309 * @param[in] filter The type of filter to remove.
1310 * @param[in] armour The armour to remove
1311 * @return
1312 * - 0 if file descriptor was unarmoured
1313 * - <0 on error.
1314 */
1315int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
1316{
1317 fr_event_fd_t *ef;
1318
1319 ef = fr_rb_find(el->fds, &(fr_event_fd_t){ .fd = fd, .filter = filter });
1320 if (unlikely(!ef)) {
1321 fr_strerror_printf("No events are registered for fd %i", fd);
1322 return -1;
1323 }
1324
1325 fr_assert(ef->armour == armour);
1326
1327 ef->armour = 0;
1328 return 0;
1329}
1330#endif
1331
1332/** Remove PID wait event from kevent if the fr_event_pid_t is freed
1333 *
1334 * @param[in] ev to free.
1335 * @return 0
1336 */
1338{
1339 struct kevent evset;
1340
1341 if (ev->parent) *ev->parent = NULL;
1342 if (!ev->is_registered || (ev->pid < 0)) return 0; /* already deleted from kevent */
1343
1344 EVENT_DEBUG("%p - Disabling event for PID %u - %p was freed", ev->el, (unsigned int)ev->pid, ev);
1345
1346 EV_SET(&evset, ev->pid, EVFILT_PROC, EV_DELETE, NOTE_EXIT, 0, ev);
1347
1348 (void) kevent(ev->el->kq, &evset, 1, NULL, 0, NULL);
1349
1350 return 0;
1351}
1352
1353/** Evaluate a EVFILT_PROC event
1354 *
1355 */
1356CC_NO_UBSAN(function) /* UBSAN: false positive - Public/private version of fr_event_list_t trips -fsanitize=function */
1357static inline CC_HINT(always_inline)
1358void event_pid_eval(fr_event_list_t *el, struct kevent *kev)
1359{
1360 pid_t pid;
1361 fr_event_pid_t *ev;
1362 fr_event_pid_cb_t callback;
1363 void *uctx;
1364
1365 EVENT_DEBUG("%p - PID %u exited with status %i",
1366 el, (unsigned int)kev->ident, (unsigned int)kev->data);
1367
1368 ev = talloc_get_type_abort((void *)kev->udata, fr_event_pid_t);
1369
1370 fr_assert(ev->pid == (pid_t) kev->ident);
1371 fr_assert((kev->fflags & NOTE_EXIT) != 0);
1372
1373 pid = ev->pid;
1374 callback = ev->callback;
1375 uctx = ev->uctx;
1376
1377 ev->is_registered = false; /* so we won't hit kevent again when it's freed */
1378
1379 /*
1380 * Delete the event before calling it.
1381 *
1382 * This also sets the parent pointer
1383 * to NULL, so the thing that started
1384 * monitoring the process knows the
1385 * handle is no longer valid.
1386 *
1387 * EVFILT_PROC NOTE_EXIT events are always
1388 * oneshot no matter what flags we pass,
1389 * so we're just reflecting the state of
1390 * the kqueue.
1391 */
1392 talloc_free(ev);
1393
1394 if (callback) callback(el, pid, (int) kev->data, uctx);
1395}
1396
1397/** Called on the next loop through the event loop when inserting an EVFILT_PROC event fails
1398 *
1399 * This is just a trampoleen function which takes the user event and simulates
1400 * an EVFILT_PROC event from it.
1401 *
1402 * @param[in] el That received the event.
1403 * @param[in] uctx An fr_event_pid_t to process.
1404 */
1406{
1407 fr_event_pid_t *ev = talloc_get_type_abort(uctx, fr_event_pid_t);
1408
1409 EVENT_DEBUG("%p - PID %ld exited early, triggered through user event", el, (long)ev->pid);
1410
1411 /*
1412 * Simulate a real struct kevent with the values we
1413 * recorded in fr_event_pid_wait.
1414 */
1415 event_pid_eval(el, &(struct kevent){ .ident = ev->pid, .data = ev->early_exit.status, .fflags = NOTE_EXIT, .udata = ev });
1416}
1417
1418/** Insert a PID event into an event list
1419 *
1420 * @note The talloc parent of the memory returned in ev_p must not be changed.
1421 * If the lifetime of the event needs to be bound to another context
1422 * this function should be called with the existing event pointed to by
1423 * ev_p.
1424 *
1425 * @param[in] ctx to bind lifetime of the event to.
1426 * @param[in] el to insert event into.
1427 * @param[in,out] ev_p If not NULL modify this event instead of creating a new one. This is a parent
1428 * in a temporal sense, not in a memory structure or dependency sense.
1429 * @param[in] pid child PID to wait for
1430 * @param[in] callback function to execute if the event fires.
1431 * @param[in] uctx user data to pass to the event.
1432 * @return
1433 * - 0 on success.
1434 * - -1 on failure.
1435 */
1437 TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p,
1438 pid_t pid, fr_event_pid_cb_t callback, void *uctx)
1439{
1440 fr_event_pid_t *ev;
1441 struct kevent evset;
1442
1443 ev = talloc(ctx, fr_event_pid_t);
1444 if (unlikely(ev == NULL)) {
1445 fr_strerror_const("Out of memory");
1446 return -1;
1447 }
1448 *ev = (fr_event_pid_t) {
1449 .el = el,
1450 .pid = pid,
1451 .callback = callback,
1452 .uctx = uctx,
1453 .parent = ev_p,
1454#ifndef NDEBUG
1455 .file = file,
1456 .line = line,
1457#endif
1458 };
1459 talloc_set_destructor(ev, _event_pid_free);
1460
1461 /*
1462 * macOS only, on FreeBSD NOTE_EXIT always provides
1463 * the status anyway.
1464 */
1465#ifndef NOTE_EXITSTATUS
1466#define NOTE_EXITSTATUS (0)
1467#endif
1468
1469 EVENT_DEBUG("%p - Adding exit waiter for PID %u", el, (unsigned int)pid);
1470
1471 EV_SET(&evset, pid, EVFILT_PROC, EV_ADD | EV_ONESHOT, NOTE_EXIT | NOTE_EXITSTATUS, 0, ev);
1472 ev->is_registered = true;
1473
1474 /*
1475 * This deals with the race where the process exited
1476 * before we could add it to the kqueue.
1477 *
1478 * Unless our caller is broken, the process should
1479 * still be available for reaping, so we check
1480 * waitid to see if there is a pending process and
1481 * then call the callback as kqueue would have done.
1482 */
1483 if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
1484 siginfo_t info;
1485 int ret;
1486
1487 /*
1488 * Ensure we don't accidentally pick up the error
1489 * from kevent.
1490 */
1492
1493 ev->is_registered = false;
1494
1495 /*
1496 * If the child exited before kevent() was
1497 * called, we need to get its status via
1498 * waitid().
1499 *
1500 * We don't reap the process here to emulate
1501 * what kqueue does (notify but not reap).
1502 *
1503 * waitid returns >0 on success, 0 if the
1504 * process is still running, and -1 on failure.
1505 *
1506 * If we get a 0, then that's extremely strange
1507 * as adding the kevent failed for a reason
1508 * other than the process already having exited.
1509 *
1510 * On Linux waitid will always return 1 to
1511 * indicate the process exited.
1512 *
1513 * On macOS we seem to get a mix of 1 or 0,
1514 * even if the si_code is one of the values
1515 * we'd consider to indicate that the process
1516 * had completed.
1517 */
1518 ret = waitid(P_PID, pid, &info, WEXITED | WNOHANG | WNOWAIT);
1519 if (ret > 0) {
1520 static fr_table_num_sorted_t const si_codes[] = {
1521 { L("exited"), CLD_EXITED },
1522 { L("killed"), CLD_KILLED },
1523 { L("dumped"), CLD_DUMPED },
1524 { L("trapped"), CLD_TRAPPED },
1525 { L("stopped"), CLD_STOPPED },
1526 { L("continued"), CLD_CONTINUED }
1527 };
1528 static size_t si_codes_len = NUM_ELEMENTS(si_codes);
1529
1530 switch (info.si_code) {
1531 case CLD_EXITED:
1532 case CLD_KILLED:
1533 case CLD_DUMPED:
1534 EVENT_DEBUG("%p - PID %ld early exit - code %s (%d), status %d",
1535 el, (long)pid, fr_table_str_by_value(si_codes, info.si_code, "<UNKOWN>"),
1536 info.si_code, info.si_status);
1537
1538 /*
1539 * Record the status for later
1540 */
1541 ev->early_exit.status = info.si_status;
1542
1543 /*
1544 * The user event acts as a surrogate for
1545 * an EVFILT_PROC event, and will be evaluated
1546 * during the next loop through the event loop.
1547 *
1548 * It will be automatically deleted when the
1549 * fr_event_pid_t is freed.
1550 *
1551 * Previously we tried to evaluate the proc
1552 * callback here directly, but this lead to
1553 * multiple problems, the biggest being that
1554 * setting requests back to resumable failed
1555 * because they were not yet yielded,
1556 * leading to hangs.
1557 */
1558 early_exit:
1559 if (fr_event_user_insert(ev, el, &ev->early_exit.ev, true, _fr_event_pid_early_exit, ev) < 0) {
1560 fr_strerror_printf_push("Failed adding wait for PID %ld, and failed adding "
1561 "backup user event", (long) pid);
1562 error:
1563 talloc_free(ev);
1564 return -1;
1565 }
1566 break;
1567
1568 default:
1569 fr_strerror_printf("Unexpected code %s (%d) whilst waiting on PID %ld",
1570 fr_table_str_by_value(si_codes, info.si_code, "<UNKOWN>"),
1571 info.si_code, (long) pid);
1572
1573 goto error;
1574 }
1575 /*
1576 * Failed adding waiter for process, but process has not completed...
1577 *
1578 * This weird, but seems to happen on macOS occasionally.
1579 *
1580 * Add an event to run early exit...
1581 *
1582 * Man pages for waitid say if it returns 0 the info struct can be in
1583 * a nondeterministic state, so there's nothing more to do.
1584 */
1585 } else if (ret == 0) {
1586 goto early_exit;
1587 } else {
1588 /*
1589 * Print this error here, so that the caller gets
1590 * the error from kevent(), and not waitpid().
1591 */
1592 fr_strerror_printf("Failed adding waiter for PID %ld - kevent %s, waitid %s",
1593 (long) pid, fr_syserror(evset.flags), fr_syserror(errno));
1594
1595 goto error;
1596 }
1597 }
1598
1599 /*
1600 * Sometimes the caller doesn't care about getting the
1601 * PID. But we still want to clean it up.
1602 */
1603 if (ev_p) *ev_p = ev;
1604
1605 return 0;
1606}
1607
1608/** Saves some boilerplate...
1609 *
1610 */
1611static inline CC_HINT(always_inline)
1612void event_list_reap_run_callback(fr_event_pid_reap_t *reap, pid_t pid, int status)
1613{
1614 if (reap->callback) reap->callback(reap->el, pid, status, reap->uctx);
1615}
1616
1617/** Does the actual reaping of PIDs
1618 *
1619 */
1620static void _fr_event_pid_reap_cb(UNUSED fr_event_list_t *el, pid_t pid, int status, void *uctx)
1621{
1622 fr_event_pid_reap_t *reap = talloc_get_type_abort(uctx, fr_event_pid_reap_t);
1623
1624 waitpid(pid, &status, WNOHANG); /* Don't block the process if there's a logic error somewhere */
1625
1626 EVENT_DEBUG("%s - Reaper reaped PID %u, status %u - %p", __FUNCTION__, pid, status, reap);
1627
1628 event_list_reap_run_callback(reap, pid, status);
1629
1630 talloc_free(reap);
1631}
1632
1634{
1635 /*
1636 * Clear out the entry in the pid_to_reap
1637 * list if the event was inserted.
1638 */
1639 if (fr_dlist_entry_in_list(&reap->entry)) {
1640 EVENT_DEBUG("%s - Removing entry from pid_to_reap %i - %p", __FUNCTION__,
1641 reap->pid_ev ? reap->pid_ev->pid : -1, reap);
1642 fr_dlist_remove(&reap->el->pid_to_reap, reap);
1643 }
1644
1645 return 0;
1646}
1647
1648/** Asynchronously wait for a PID to exit, then reap it
1649 *
1650 * This is intended to be used when we no longer care about a process
1651 * exiting, but we still want to clean up its state so we don't have
1652 * zombie processes sticking around.
1653 *
1654 * @param[in] el to use to reap the process.
1655 * @param[in] pid to reap.
1656 * @param[in] callback to call when the process is reaped.
1657 * May be NULL.
1658 * @param[in] uctx to pass to callback.
1659 * @return
1660 * - -1 if we couldn't find the process or it has already exited/been reaped.
1661 * - 0 on success (we setup a process handler).
1662 */
1664{
1665 int ret;
1666 fr_event_pid_reap_t *reap;
1667
1668 reap = talloc_zero(NULL, fr_event_pid_reap_t);
1669 if (unlikely(!reap)) {
1670 fr_strerror_const("Out of memory");
1671 return -1;
1672 }
1673 talloc_set_destructor(reap, _fr_event_reap_free);
1674
1676 if (ret < 0) {
1677 talloc_free(reap);
1678 return ret;
1679 }
1680
1681 reap->el = el;
1682 reap->callback = callback;
1683 reap->uctx = uctx;
1684
1685 EVENT_DEBUG("%s - Adding reaper for PID %u - %p", __FUNCTION__, pid, reap);
1686
1688
1689 return ret;
1690}
1691
1692/** Send a signal to all the processes we have in our reap list, and reap them
1693 *
1694 * @param[in] el containing the processes to reap.
1695 * @param[in] timeout how long to wait before we signal the processes.
1696 * @param[in] signal to send to processes. Should be a fatal signal.
1697 * @return The number of processes reaped.
1698 */
1700{
1702 fr_event_pid_reap_t *reap = NULL;
1703
1704 /*
1705 * If we've got a timeout, our best option
1706 * is to use a kqueue instance to monitor
1707 * for process exit.
1708 */
1710 int status;
1711 struct kevent evset;
1712 int waiting = 0;
1713 int kq = kqueue();
1714 fr_time_t now, start = el->pub.tl->time(), end = fr_time_add(start, timeout);
1715
1716 if (unlikely(kq < 0)) goto force;
1717
1719 if (!i->pid_ev) {
1720 EVENT_DEBUG("%p - %s - Reaper already called (logic error)... - %p",
1721 el, __FUNCTION__, i);
1722
1723 event_list_reap_run_callback(i, -1, SIGKILL);
1724 talloc_free(i);
1725 continue;
1726 }
1727
1728 /*
1729 * See if any processes have exited already
1730 */
1731 if (waitpid(i->pid_ev->pid, &status, WNOHANG) == i->pid_ev->pid) { /* reap */
1732 EVENT_DEBUG("%p - %s - Reaper PID %u already exited - %p",
1733 el, __FUNCTION__, i->pid_ev->pid, i);
1734 event_list_reap_run_callback(i, i->pid_ev->pid, SIGKILL);
1735 talloc_free(i);
1736 continue;
1737 }
1738
1739 /*
1740 * Add the rest to a temporary event loop
1741 */
1742 EV_SET(&evset, i->pid_ev->pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, i);
1743 if (kevent(kq, &evset, 1, NULL, 0, NULL) < 0) {
1744 EVENT_DEBUG("%p - %s - Failed adding reaper PID %u to tmp event loop - %p",
1745 el, __FUNCTION__, i->pid_ev->pid, i);
1746 event_list_reap_run_callback(i, i->pid_ev->pid, SIGKILL);
1747 talloc_free(i);
1748 continue;
1749 }
1750 waiting++;
1751 }}
1752
1753 /*
1754 * Keep draining process exits as they come in...
1755 */
1756 while ((waiting > 0) && fr_time_gt(end, (now = el->pub.tl->time()))) {
1757 struct kevent kev;
1758 int ret;
1759
1760 ret = kevent(kq, NULL, 0, &kev, 1, &fr_time_delta_to_timespec(fr_time_sub(end, now)));
1761 switch (ret) {
1762 default:
1763 EVENT_DEBUG("%p - %s - Reaper tmp loop error %s, forcing process reaping",
1764 el, __FUNCTION__, fr_syserror(errno));
1765 close(kq);
1766 goto force;
1767
1768 case 0:
1769 EVENT_DEBUG("%p - %s - Reaper timeout waiting for process exit, forcing process reaping",
1770 el, __FUNCTION__);
1771 close(kq);
1772 goto force;
1773
1774 case 1:
1775 reap = talloc_get_type_abort(kev.udata, fr_event_pid_reap_t);
1776
1777 EVENT_DEBUG("%p - %s - Reaper reaped PID %u, status %u - %p",
1778 el, __FUNCTION__, (unsigned int)kev.ident, (unsigned int)kev.data, reap);
1779 waitpid(reap->pid_ev->pid, &status, WNOHANG); /* reap */
1780
1781 event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
1782 talloc_free(reap);
1783 break;
1784 }
1785 waiting--;
1786 }
1787
1788 close(kq);
1789 }
1790
1791force:
1792 /*
1793 * Deal with any lingering reap requests
1794 */
1795 while ((reap = fr_dlist_head(&el->pid_to_reap))) {
1796 int status;
1797
1798 EVENT_DEBUG("%s - Reaper forcefully reaping PID %u - %p", __FUNCTION__, reap->pid_ev->pid, reap);
1799
1800 if (kill(reap->pid_ev->pid, signal) < 0) {
1801 /*
1802 * Make sure we don't hang if the
1803 * process has actually exited.
1804 *
1805 * We could check for ESRCH but it's
1806 * not clear if that'd be returned
1807 * for a PID in the unreaped state
1808 * or not...
1809 */
1810 waitpid(reap->pid_ev->pid, &status, WNOHANG);
1811 event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
1812 talloc_free(reap);
1813 continue;
1814 }
1815
1816 /*
1817 * Wait until the child process exits
1818 */
1819 waitpid(reap->pid_ev->pid, &status, 0);
1820 event_list_reap_run_callback(reap, reap->pid_ev->pid, status);
1822 }
1823
1825}
1826
1827/** Memory will not be freed if we fail to remove the event from the kqueue
1828 *
1829 * It's easier to debug memory leaks with modern tooling than it is
1830 * to determine why we get random failures and event leaks inside of kqueue.
1831 *
1832 * @return
1833 * - 0 on success.
1834 * - -1 on failure.
1835 */
1837{
1838 if (ev->is_registered) {
1839 struct kevent evset;
1840
1841 EV_SET(&evset, (uintptr_t)ev, EVFILT_USER, EV_DELETE, 0, 0, 0);
1842
1843 if (unlikely(kevent(ev->el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
1844 fr_strerror_printf("Failed removing user event - kevent %s", fr_syserror(evset.flags));
1845 return -1;
1846 }
1847 ev->is_registered = false;
1848 }
1849
1850 return 0;
1851}
1852
1853static inline CC_HINT(always_inline)
1854void event_user_eval(fr_event_list_t *el, struct kevent *kev)
1855{
1856 fr_event_user_t *ev;
1857
1858 /*
1859 * This is just a "wakeup" event, which
1860 * is always ignored.
1861 */
1862 if (kev->ident == 0) return;
1863
1864 ev = talloc_get_type_abort((void *)kev->ident, fr_event_user_t);
1865 fr_assert((uintptr_t)ev == kev->ident);
1866
1867 ev->callback(el, ev->uctx);
1868}
1869
1870/** Add a user callback to the event list.
1871 *
1872 * @param[in] ctx to allocate the event in.
1873 * @param[in] el Containing the timer events.
1874 * @param[out] ev_p Where to write a pointer.
1875 * @param[in] trigger Whether the user event is triggered initially.
1876 * @param[in] callback for EVFILT_USER.
1877 * @param[in] uctx for the callback.
1878 * @return
1879 * - 0 on success.
1880 * - -1 on error.
1881 */
1883 TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p,
1884 bool trigger, fr_event_user_cb_t callback, void *uctx)
1885{
1886 fr_event_user_t *ev;
1887 struct kevent evset;
1888
1889 ev = talloc(ctx, fr_event_user_t);
1890 if (unlikely(ev == NULL)) {
1891 fr_strerror_const("Out of memory");
1892 return -1;
1893 }
1894 *ev = (fr_event_user_t) {
1895 .el = el,
1896 .callback = callback,
1897 .uctx = uctx,
1898#ifndef NDEBUG
1899 .file = file,
1900 .line = line,
1901#endif
1902 };
1903
1904 EV_SET(&evset, (uintptr_t)ev,
1905 EVFILT_USER, EV_ADD | EV_DISPATCH, (trigger * NOTE_TRIGGER), 0, ev);
1906
1907 if (unlikely(kevent(el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
1908 fr_strerror_printf("Failed adding user event - kevent %s", fr_syserror(evset.flags));
1909 talloc_free(ev);
1910 return -1;
1911 }
1912 ev->is_registered = true;
1913 talloc_set_destructor(ev, _event_user_delete);
1914
1915 if (ev_p) *ev_p = ev;
1916
1917 return 0;
1918}
1919
1920/** Trigger a user event
1921 *
1922 * @param[in] ev Handle for the user event.
1923 * @return
1924 * - 0 on success.
1925 * - -1 on error.
1926 */
1928{
1929 struct kevent evset;
1930
1931 EV_SET(&evset, (uintptr_t)ev, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL);
1932
1933 if (unlikely(kevent(ev->el->kq, &evset, 1, NULL, 0, NULL) < 0)) {
1934 fr_strerror_printf("Failed triggering user event - kevent %s", fr_syserror(evset.flags));
1935 return -1;
1936 }
1937
1938 return 0;
1939}
1940
1941/** Add a pre-event callback to the event list.
1942 *
1943 * Events are serviced in insert order. i.e. insert A, B, we then
1944 * have A running before B.
1945 *
1946 * @param[in] el Containing the timer events.
1947 * @param[in] callback The pre-processing callback.
1948 * @param[in] uctx for the callback.
1949 * @return
1950 * - < 0 on error
1951 * - 0 on success
1952 */
1954{
1955 fr_event_pre_t *pre;
1956
1957 pre = talloc(el, fr_event_pre_t);
1958 pre->callback = callback;
1959 pre->uctx = uctx;
1960
1962
1963 return 0;
1964}
1965
1966/** Delete a pre-event callback from the event list.
1967 *
1968 * @param[in] el Containing the timer events.
1969 * @param[in] callback The pre-processing callback.
1970 * @param[in] uctx for the callback.
1971 * @return
1972 * - < 0 on error
1973 * - 0 on success
1974 */
1976{
1977 fr_event_pre_t *pre, *next;
1978
1979 for (pre = fr_dlist_head(&el->pre_callbacks);
1980 pre != NULL;
1981 pre = next) {
1982 next = fr_dlist_next(&el->pre_callbacks, pre);
1983
1984 if ((pre->callback == callback) &&
1985 (pre->uctx == uctx)) {
1987 talloc_free(pre);
1988 return 0;
1989 }
1990 }
1991
1992 return -1;
1993}
1994
1995/** Add a post-event callback to the event list.
1996 *
1997 * Events are serviced in insert order. i.e. insert A, B, we then
1998 * have A running before B.
1999 *
2000 * @param[in] el Containing the timer events.
2001 * @param[in] callback The post-processing callback.
2002 * @param[in] uctx for the callback.
2003 * @return
2004 * - < 0 on error
2005 * - 0 on success
2006 */
2008{
2009 fr_event_post_t *post;
2010
2011 post = talloc(el, fr_event_post_t);
2012 post->callback = callback;
2013 post->uctx = uctx;
2014
2016
2017 return 0;
2018}
2019
2020/** Delete a post-event callback from the event list.
2021 *
2022 * @param[in] el Containing the timer events.
2023 * @param[in] callback The post-processing callback.
2024 * @param[in] uctx for the callback.
2025 * @return
2026 * - < 0 on error
2027 * - 0 on success
2028 */
2030{
2031 fr_event_post_t *post, *next;
2032
2033 for (post = fr_dlist_head(&el->post_callbacks);
2034 post != NULL;
2035 post = next) {
2036 next = fr_dlist_next(&el->post_callbacks, post);
2037
2038 if ((post->callback == callback) &&
2039 (post->uctx == uctx)) {
2041 talloc_free(post);
2042 return 0;
2043 }
2044 }
2045
2046 return -1;
2047}
2048
2049/** Gather outstanding timer and file descriptor events
2050 *
2051 * @param[in] el to process events for.
2052 * @param[in] now The current time.
2053 * @param[in] wait if true, block on the kevent() call until a timer or file descriptor event occurs.
2054 * @return
2055 * - <0 error, or the event loop is exiting
2056 * - the number of outstanding I/O events, +1 if at least one timer will fire.
2057 */
2059{
2060 fr_time_delta_t when, *wake;
2061 struct timespec ts_when, *ts_wake;
2062 fr_event_pre_t *pre;
2063 int num_fd_events;
2064 bool timer_event_ready = false;
2065 fr_time_t next;
2066
2067 el->num_fd_events = 0;
2068
2069 if (el->will_exit || el->exit) {
2070 el->exit = el->will_exit;
2071
2072 fr_strerror_const("Event loop exiting");
2073 return -1;
2074 }
2075
2076 /*
2077 * By default we wait for 0ns, which means returning
2078 * immediately from kevent().
2079 */
2080 when = fr_time_delta_wrap(0);
2081 wake = &when;
2082
2083 /*
2084 * See when we have to wake up. Either now, if the timer
2085 * events are in the past. Or, we wait for a future
2086 * timer event.
2087 */
2088 next = fr_timer_list_when(el->pub.tl);
2089 if (fr_time_neq(next, fr_time_wrap(0))) {
2090 if (fr_time_lteq(next, now)) {
2091 timer_event_ready = true;
2092
2093 } else if (wait) {
2094 when = fr_time_sub(next, now);
2095
2096 } /* else we're not waiting, leave "when == 0" */
2097
2098 } else if (wait) {
2099 /*
2100 * We're asked to wait, but there's no timer
2101 * event. We can then sleep forever.
2102 */
2103 wake = NULL;
2104 }
2105
2106 /*
2107 * Run the status callbacks. It may tell us that the
2108 * application has more work to do, in which case we
2109 * re-set the timeout to be instant.
2110 *
2111 * We only run these callbacks if the caller is otherwise
2112 * idle.
2113 */
2114 if (wait) {
2115 for (pre = fr_dlist_head(&el->pre_callbacks);
2116 pre != NULL;
2117 pre = fr_dlist_next(&el->pre_callbacks, pre)) {
2118 if (pre->callback(now, wake ? *wake : fr_time_delta_wrap(0), pre->uctx) > 0) {
2119 wake = &when;
2120 when = fr_time_delta_wrap(0);
2121 }
2122 }
2123 }
2124
2125 /*
2126 * Wake is the delta between el->now
2127 * (the event loops view of the current time)
2128 * and when the event should occur.
2129 */
2130 if (wake) {
2131 ts_when = fr_time_delta_to_timespec(when);
2132 ts_wake = &ts_when;
2133 } else {
2134 ts_wake = NULL;
2135 }
2136
2137 /*
2138 * Populate el->events with the list of I/O events
2139 * that occurred since this function was last called
2140 * or wait for the next timer event.
2141 */
2142 num_fd_events = kevent(el->kq, NULL, 0, el->events, FR_EV_BATCH_FDS, ts_wake);
2143
2144 /*
2145 * Interrupt is different from timeout / FD events.
2146 */
2147 if (unlikely(num_fd_events < 0)) {
2148 if (errno == EINTR) {
2149 return 0;
2150 } else {
2151 fr_strerror_printf("Failed calling kevent: %s", fr_syserror(errno));
2152 return -1;
2153 }
2154 }
2155
2156 el->num_fd_events = num_fd_events;
2157
2158 EVENT_DEBUG("%p - %s - kevent returned %u FD events", el, __FUNCTION__, el->num_fd_events);
2159
2160 /*
2161 * If there are no FD events, we must have woken up from a timer
2162 */
2163 if (!num_fd_events) {
2164 if (wait) timer_event_ready = true;
2165 }
2166 /*
2167 * The caller doesn't really care what the value of the
2168 * return code is. Just that it's greater than zero if
2169 * events needs servicing.
2170 *
2171 * num_fd_events > 0 - if kevent() returns FD events
2172 * timer_event_ready > 0 - if there were timers ready BEFORE or AFTER calling kevent()
2173 */
2174 return num_fd_events + timer_event_ready;
2175}
2176
2177CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private fr_event_list_t trips --fsanitize=function*/
2178static inline CC_HINT(always_inline)
2179void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags)
2180{
2181 fr_event_fd_cb_t fd_cb;
2182
2183 while ((fd_cb = event_fd_func(ef, filter, fflags))) {
2184 fd_cb(el, ef->fd, flags, ef->uctx);
2185 }
2186}
2187
2188/** Service any outstanding timer or file descriptor events
2189 *
2190 * @param[in] el containing events to service.
2191 */
2192CC_NO_UBSAN(function) /* UBSAN: false positive - Public/private version of fr_event_list_t trips -fsanitize=function */
2194{
2195 fr_timer_list_t *etl = el->pub.tl;
2196 int i;
2197 fr_event_post_t *post;
2198 fr_time_t when, now;
2199
2200 if (unlikely(el->exit)) return;
2201
2202 EVENT_DEBUG("%p - %s - Servicing %u FD events", el, __FUNCTION__, el->num_fd_events);
2203
2204 /*
2205 * Run all of the file descriptor events.
2206 */
2207 el->in_handler = true;
2208 for (i = 0; i < el->num_fd_events; i++) {
2209 /*
2210 * Process any user events
2211 */
2212 switch (el->events[i].filter) {
2213 case EVFILT_USER:
2214 event_user_eval(el, &el->events[i]);
2215 continue;
2216
2217 /*
2218 * Process proc events
2219 */
2220 case EVFILT_PROC:
2221 event_pid_eval(el, &el->events[i]);
2222 continue;
2223
2224 /*
2225 * Process various types of file descriptor events
2226 */
2227 default:
2228 {
2229 fr_event_fd_t *ef = talloc_get_type_abort(el->events[i].udata, fr_event_fd_t);
2230 int fd_errno = 0;
2231
2232 int fflags = el->events[i].fflags; /* mutable */
2233 int filter = el->events[i].filter;
2234 int flags = el->events[i].flags;
2235
2236 if (!ef->is_registered) continue; /* Was deleted between corral and service */
2237
2238 if (unlikely(flags & EV_ERROR)) {
2239 fd_errno = el->events[i].data;
2240 ev_error:
2241 /*
2242 * Call the error handler, but only if the socket hasn't been deleted at EOF
2243 * below.
2244 */
2245 if (ef->is_registered && ef->error) ef->error(el, ef->fd, flags, fd_errno, ef->uctx);
2246 TALLOC_FREE(ef);
2247 continue;
2248 }
2249
2250 /*
2251 * EOF can indicate we've actually reached
2252 * the end of a file, but for sockets it usually
2253 * indicates the other end of the connection
2254 * has gone away.
2255 */
2256 if (flags & EV_EOF) {
2257 /*
2258 * This is fine, the callback will get notified
2259 * via the flags field.
2260 */
2261 if (ef->type == FR_EVENT_FD_FILE) goto service;
2262#if defined(__linux__) && defined(SO_GET_FILTER)
2263 /*
2264 * There seems to be an issue with the
2265 * ioctl(...SIOCNQ...) call libkqueue
2266 * uses to determine the number of bytes
2267 * readable. When ioctl returns, the number
2268 * of bytes available is set to zero, which
2269 * libkqueue interprets as EOF.
2270 *
2271 * As a workaround, if we're not reading
2272 * a file, and are operating on a raw socket
2273 * with a packet filter attached, we ignore
2274 * the EOF flag and continue.
2275 */
2276 if ((ef->sock_type == SOCK_RAW) && (ef->type == FR_EVENT_FD_PCAP)) goto service;
2277#endif
2278
2279 /*
2280 * If we see an EV_EOF flag that means the
2281 * read side of the socket has been closed
2282 * but there may still be pending data.
2283 *
2284 * Dispatch the read event and then error.
2285 */
2286 if ((el->events[i].filter == EVFILT_READ) && (el->events[i].data > 0)) {
2287 event_callback(el, ef, &filter, flags, &fflags);
2288 }
2289
2290 fd_errno = el->events[i].fflags;
2291
2292 goto ev_error;
2293 }
2294
2295 service:
2296#ifndef NDEBUG
2297 EVENT_DEBUG("Running event for fd %d, from %s[%d]", ef->fd, ef->file, ef->line);
2298#endif
2299
2300 /*
2301 * Service the event_fd events
2302 */
2303 event_callback(el, ef, &filter, flags, &fflags);
2304 }
2305 }
2306 }
2307
2308 /*
2309 * Process any deferred frees performed
2310 * by the I/O handlers.
2311 *
2312 * The events are removed from the FD rbtree
2313 * and kevent immediately, but frees are
2314 * deferred to allow stale events to be
2315 * skipped sans SEGV.
2316 */
2317 el->in_handler = false; /* Allow events to be deleted */
2318 {
2319 fr_event_fd_t *ef;
2320
2321 while ((ef = fr_dlist_head(&el->fd_to_free))) talloc_free(ef);
2322 }
2323
2324 /*
2325 * We must call el->time() again here, else the event
2326 * list's time gets updated too infrequently, and we
2327 * can end up with a situation where timers are
2328 * serviced much later than they should be, which can
2329 * cause strange interaction effects, spurious calls
2330 * to kevent, and busy loops.
2331 */
2332 now = etl->time();
2333
2334 /*
2335 * Run all of the timer events. Note that these can add
2336 * new timers!
2337 */
2339 int ret;
2340
2341 when = now;
2342
2343 ret = fr_timer_list_run(etl, &when);
2344 if (!fr_cond_assert(ret >= 0)) { /* catastrophic error, trigger event loop exit */
2345 el->exit = 1;
2346 return;
2347 }
2348
2349 EVENT_DEBUG("%p - %s - Serviced %u timer(s)", el, __FUNCTION__, (unsigned int)ret);
2350 }
2351
2352 now = etl->time();
2353
2354 /*
2355 * Run all of the post-processing events.
2356 */
2357 for (post = fr_dlist_head(&el->post_callbacks);
2358 post != NULL;
2359 post = fr_dlist_next(&el->post_callbacks, post)) {
2360 post->callback(el, now, post->uctx);
2361 }
2362}
2363
2364/** Signal an event loop exit with the specified code
2365 *
2366 * The event loop will complete its current iteration, and then exit with the specified code.
2367 *
2368 * @param[in] el to signal to exit.
2369 * @param[in] code for #fr_event_loop to return.
2370 */
2372{
2373 if (unlikely(!el)) return;
2374
2375 el->will_exit = code;
2376}
2377
2378/** Check to see whether the event loop is in the process of exiting
2379 *
2380 * @param[in] el to check.
2381 */
2383{
2384 return ((el->will_exit != 0) || (el->exit != 0));
2385}
2386
2387/** Run an event loop
2388 *
2389 * @note Will not return until #fr_event_loop_exit is called.
2390 *
2391 * @param[in] el to start processing.
2392 */
2393CC_HINT(flatten) int fr_event_loop(fr_event_list_t *el)
2394{
2395 el->will_exit = el->exit = 0;
2396
2397 el->dispatch = true;
2398 while (!el->exit) {
2399 if (unlikely(fr_event_corral(el, el->pub.tl->time(), true)) < 0) break;
2401 }
2402
2403 /*
2404 * Give processes five seconds to exit.
2405 * This means any triggers that we may
2406 * have issued when the server exited
2407 * have a chance to complete.
2408 */
2410 el->dispatch = false;
2411
2412 return el->exit;
2413}
2414
2415/** Cleanup an event list
2416 *
2417 * Frees/destroys any resources associated with an event list
2418 *
2419 * @param[in] el to free resources for.
2420 */
2422{
2424
2425 talloc_free_children(el);
2426
2427 if (el->kq >= 0) close(el->kq);
2428
2429 return 0;
2430}
2431
2432/** Free any memory we allocated for indexes
2433 *
2434 */
2435static int _event_free_indexes(UNUSED void *uctx)
2436{
2437 unsigned int i;
2438
2439 for (i = 0; i < NUM_ELEMENTS(filter_maps); i++) if (talloc_free(filter_maps[i].ev_to_func) < 0) return -1;
2440 return 0;
2441}
2442
2443static int _event_build_indexes(UNUSED void *uctx)
2444{
2445 unsigned int i;
2446
2448 return 0;
2449}
2450
2451#ifdef EVFILT_LIBKQUEUE
2452/** kqueue logging wrapper function
2453 *
2454 */
2455static CC_HINT(format (printf, 1, 2)) CC_HINT(nonnull)
2456void _event_kqueue_log(char const *fmt, ...)
2457{
2458 va_list ap;
2459
2460 va_start(ap, fmt);
2461 fr_vlog(&default_log, L_DBG, __FILE__, __LINE__, fmt, ap);
2462 va_end(ap);
2463}
2464
2465/** If we're building with libkqueue, and at debug level 4 or higher, enable libkqueue debugging output
2466 *
2467 * This requires a debug build of libkqueue
2468 */
2469static int _event_kqueue_logging(UNUSED void *uctx)
2470{
2471 struct kevent kev, receipt;
2472
2473 log_conf_kq = kqueue();
2474 if (unlikely(log_conf_kq < 0)) {
2475 fr_strerror_const("Failed initialising logging configuration kqueue");
2476 return -1;
2477 }
2478
2479 EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG_FUNC, (intptr_t)_event_kqueue_log, NULL);
2480 if (kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}) != 1) {
2481 close(log_conf_kq);
2482 log_conf_kq = -1;
2483 return 1;
2484 }
2485
2486 if (fr_debug_lvl >= L_DBG_LVL_3) {
2487 EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG, 1, NULL);
2488 if (kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){}) != 1) {
2489 fr_strerror_const("Failed enabling libkqueue debug logging");
2490 close(log_conf_kq);
2491 log_conf_kq = -1;
2492 return -1;
2493 }
2494 }
2495
2496 return 0;
2497}
2498
2499static int _event_kqueue_logging_stop(UNUSED void *uctx)
2500{
2501 struct kevent kev, receipt;
2502
2503 EV_SET(&kev, 0, EVFILT_LIBKQUEUE, EV_ADD, NOTE_DEBUG_FUNC, 0, NULL);
2504 (void)kevent(log_conf_kq, &kev, 1, &receipt, 1, &(struct timespec){});
2505
2506 close(log_conf_kq);
2507 log_conf_kq = -1;
2508
2509 return 0;
2510}
2511#endif
2512
2513/** Initialise a new event list
2514 *
2515 * @param[in] ctx to allocate memory in.
2516 * @param[in] status callback, called on each iteration of the event list.
2517 * @param[in] status_uctx context for the status callback
2518 * @return
2519 * - A pointer to a new event list on success (free with talloc_free).
2520 * - NULL on error.
2521 */
2522fr_event_list_t *fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_uctx)
2523{
2525 struct kevent kev;
2526 int ret;
2527
2528 /*
2529 * Build the map indexes the first time this
2530 * function is called.
2531 */
2532 fr_atexit_global_once_ret(&ret, _event_build_indexes, _event_free_indexes, NULL);
2533#ifdef EVFILT_LIBKQUEUE
2534 fr_atexit_global_once_ret(&ret, _event_kqueue_logging, _event_kqueue_logging_stop, NULL);
2535#endif
2536
2537 el = talloc_zero(ctx, fr_event_list_t);
2538 if (!fr_cond_assert(el)) {
2539 fr_strerror_const("Out of memory");
2540 return NULL;
2541 }
2542 el->kq = -1; /* So destructor can be used before kqueue() provides us with fd */
2543 talloc_set_destructor(el, _event_list_free);
2544
2546 if (!el->pub.tl) {
2547 fr_strerror_const("Failed allocating timer list");
2548 error:
2549 talloc_free(el);
2550 return NULL;
2551 }
2552
2554 if (!el->fds) {
2555 fr_strerror_const("Failed allocating FD tree");
2556 goto error;
2557 }
2558
2559 el->kq = kqueue();
2560 if (el->kq < 0) {
2561 fr_strerror_printf("Failed allocating kqueue: %s", fr_syserror(errno));
2562 goto error;
2563 }
2564
2569 if (status) (void) fr_event_pre_insert(el, status, status_uctx);
2570
2571 /*
2572 * Set our "exit" callback as ident 0.
2573 */
2574 EV_SET(&kev, 0, EVFILT_USER, EV_ADD | EV_CLEAR, NOTE_FFNOP, 0, NULL);
2575 if (kevent(el->kq, &kev, 1, NULL, 0, NULL) < 0) {
2576 fr_strerror_printf("Failed adding exit callback to kqueue: %s", fr_syserror(errno));
2577 goto error;
2578 }
2579
2580 return el;
2581}
2582
2583/** Return whether the event loop has any active events
2584 *
2585 */
2590#ifdef TESTING
2591/*
2592 * cc -g -I .. -c rb.c -o rbtree.o && cc -g -I .. -c isaac.c -o isaac.o && cc -DTESTING -I .. -c event.c -o event_mine.o && cc event_mine.o rbtree.o isaac.o -o event
2593 *
2594 * ./event
2595 *
2596 * And hit CTRL-S to stop the output, CTRL-Q to continue.
2597 * It normally alternates printing the time and sleeping,
2598 * but when you hit CTRL-S/CTRL-Q, you should see a number
2599 * of events run right after each other.
2600 *
2601 * OR
2602 *
2603 * valgrind --tool=memcheck --leak-check=full --show-reachable=yes ./event
2604 */
2605
2606static void print_time(void *ctx)
2607{
2608 fr_time_t when;
2609 int64_t usec;
2610
2611 when = *(fr_time_t *) ctx;
2612 usec = fr_time_to_usec(when);
2613
2614 printf("%d.%06d\n", usec / USEC, usec % USEC);
2615 fflush(stdout);
2616}
2617
2618static fr_randctx rand_pool;
2619
2620static uint32_t event_rand(void)
2621{
2622 uint32_t num;
2623
2624 num = rand_pool.randrsl[rand_pool.randcnt++];
2625 if (rand_pool.randcnt == 256) {
2626 fr_isaac(&rand_pool);
2627 rand_pool.randcnt = 0;
2628 }
2629
2630 return num;
2631}
2632
2633
2634#define MAX 100
2635int main(int argc, char **argv)
2636{
2637 int i, rcode;
2638 fr_time_t array[MAX];
2639 fr_time_t now, when;
2641
2642 el = fr_event_list_alloc(NULL, NULL);
2643 if (!el) fr_exit_now(1);
2644
2645 memset(&rand_pool, 0, sizeof(rand_pool));
2646 rand_pool.randrsl[1] = time(NULL);
2647
2648 fr_rand_init(&rand_pool, 1);
2649 rand_pool.randcnt = 0;
2650
2651 array[0] = el->time();
2652 for (i = 1; i < MAX; i++) {
2653 array[i] = array[i - 1];
2654 array[i] += event_rand() & 0xffff;
2655
2656 fr_timer_at(NULL, el, array[i], false, print_time, array[i]);
2657 }
2658
2659 while (fr_event_list_num_timers(el)) {
2660 now = el->time();
2661 when = now;
2662 if (!fr_timer_run(el, &when)) {
2663 int delay = (when - now) / 1000; /* nanoseconds to microseconds */
2664
2665 printf("\tsleep %d microseconds\n", delay);
2666 fflush(stdout);
2667 usleep(delay);
2668 }
2669 }
2670
2671 talloc_free(el);
2672
2673 return 0;
2674}
2675#endif
int const char * file
Definition acutest.h:702
va_end(args)
static int const char * fmt
Definition acutest.h:573
int const char int line
Definition acutest.h:702
va_start(args, fmt)
#define RCSID(id)
Definition build.h:485
#define DIAG_UNKNOWN_PRAGMAS
Definition build.h:458
#define L(_str)
Helper for initialising arrays of string literals.
Definition build.h:209
#define typeof_field(_type, _field)
Typeof field.
Definition build.h:174
#define DIAG_ON(_x)
Definition build.h:460
#define CC_NO_UBSAN(_sanitize)
Definition build.h:428
#define CMP_RETURN(_a, _b, _field)
Return if the comparison is not 0 (is unequal)
Definition build.h:121
#define CMP(_a, _b)
Same as CMP_PREFER_SMALLER use when you don't really care about ordering, you just want an ordering.
Definition build.h:112
#define unlikely(_x)
Definition build.h:383
#define NDEBUG_LOCATION_VALS
Definition build.h:264
#define NDEBUG_LOCATION_ARGS
Pass caller information to the function.
Definition build.h:263
#define UNUSED
Definition build.h:317
#define NUM_ELEMENTS(_t)
Definition build.h:339
#define DIAG_OFF(_x)
Definition build.h:459
#define fr_cond_assert(_x)
Calls panic_action ifndef NDEBUG, else logs error and evaluates to value of _x.
Definition debug.h:131
#define fr_assert_fail(_msg,...)
Calls panic_action ifndef NDEBUG, else logs error.
Definition debug.h:208
#define fr_cond_assert_msg(_x, _fmt,...)
Calls panic_action ifndef NDEBUG, else logs error and evaluates to value of _x.
Definition debug.h:148
#define fr_exit_now(_x)
Exit without calling atexit() handlers, producing a log message in debug builds.
Definition debug.h:226
int main(int argc, char **argv)
Definition dhcpclient.c:531
static void * fr_dlist_head(fr_dlist_head_t const *list_head)
Return the HEAD item of a list or NULL if the list is empty.
Definition dlist.h:486
static void * fr_dlist_remove(fr_dlist_head_t *list_head, void *ptr)
Remove an item from the list.
Definition dlist.h:638
static bool fr_dlist_entry_in_list(fr_dlist_t const *entry)
Check if a list entry is part of a list.
Definition dlist.h:163
static unsigned int fr_dlist_num_elements(fr_dlist_head_t const *head)
Return the number of elements in the dlist.
Definition dlist.h:939
static int fr_dlist_insert_tail(fr_dlist_head_t *list_head, void *ptr)
Insert an item into the tail of a list.
Definition dlist.h:378
#define fr_dlist_talloc_init(_head, _type, _field)
Initialise the head structure of a doubly linked list.
Definition dlist.h:275
#define fr_dlist_foreach_safe(_list_head, _type, _iter)
Iterate over the contents of a list allowing for removals.
Definition dlist.h:108
static void * fr_dlist_next(fr_dlist_head_t const *list_head, void const *ptr)
Get the next item in a list.
Definition dlist.h:555
Head of a doubly linked list.
Definition dlist.h:51
Entry in a doubly linked list.
Definition dlist.h:41
fr_event_io_func_t io
Read/write functions.
Definition event.h:215
struct fr_event_user_s fr_event_user_t
An opaque user event handle.
Definition event.h:79
void(* fr_event_fd_cb_t)(fr_event_list_t *el, int fd, int flags, void *uctx)
Called when an IO event occurs on a file descriptor.
Definition event.h:151
@ FR_EVENT_OP_SUSPEND
Temporarily remove the relevant filter from kevent.
Definition event.h:91
@ FR_EVENT_OP_RESUME
Reinsert the filter into kevent.
Definition event.h:92
fr_event_filter_t
The type of filter to install for an FD.
Definition event.h:83
@ FR_EVENT_FILTER_VNODE
Filter for vnode subfilters.
Definition event.h:85
@ FR_EVENT_FILTER_IO
Combined filter for read/write functions/.
Definition event.h:84
size_t offset
Offset of function in func struct.
Definition event.h:98
fr_timer_list_t * tl
The timer list associated with this event loop.
Definition event.h:47
struct fr_event_pid fr_event_pid_t
An opaque PID status handle.
Definition event.h:75
fr_event_fd_cb_t read
Callback for when data is available.
Definition event.h:190
void(* fr_event_pid_cb_t)(fr_event_list_t *el, pid_t pid, int status, void *uctx)
Called when a child process has exited.
Definition event.h:170
void(* fr_event_error_cb_t)(fr_event_list_t *el, int fd, int flags, int fd_errno, void *uctx)
Called when an IO error event occurs on a file descriptor.
Definition event.h:161
int(* fr_event_status_cb_t)(fr_time_t now, fr_time_delta_t wake, void *uctx)
Called after each event loop cycle.
Definition event.h:142
void(* fr_event_post_cb_t)(fr_event_list_t *el, fr_time_t now, void *uctx)
Called when a post event fires.
Definition event.h:185
fr_event_op_t op
Operation to perform on function/filter.
Definition event.h:99
void(* fr_event_user_cb_t)(fr_event_list_t *el, void *uctx)
Called when a user kevent occurs.
Definition event.h:177
#define EVENT_DEBUG(...)
Definition event.h:66
#define fr_event_user_insert(_ctx, _el, _ev_p, _trigger, _callback, _uctx)
Definition event.h:280
Callbacks for the FR_EVENT_FILTER_IO filter.
Definition event.h:189
Public event list structure.
Definition event.h:46
Structure describing a modification to a filter's state.
Definition event.h:97
Callbacks for the FR_EVENT_FILTER_VNODE filter.
Definition event.h:196
Union of all filter functions.
Definition event.h:214
free(array)
void fr_isaac(fr_randctx *ctx)
Definition isaac.c:46
fr_dlist_head_t pre_callbacks
callbacks when we may be idle...
Definition event.c:392
void fr_event_service(fr_event_list_t *el)
Service any outstanding timer or file descriptor events.
Definition event.c:2193
fr_dlist_head_t post_callbacks
post-processing callbacks
Definition event.c:393
static fr_event_func_map_t filter_maps[]
Definition event.c:138
static int fr_event_fd_type_set(fr_event_fd_t *ef, int fd)
Discover the type of a file descriptor.
Definition event.c:754
fr_event_func_map_entry_t * func_to_ev
Function -> Event maps coalesced, out of order.
Definition event.c:134
fr_event_error_cb_t error
Callback for when an error occurs on the FD.
Definition event.c:276
char const * file
Source file this event was last updated in.
Definition event.c:318
static int8_t fr_event_fd_cmp(void const *one, void const *two)
Compare two file descriptor handles.
Definition event.c:536
fr_event_pid_cb_t callback
callback to run when the child exits
Definition event.c:334
fr_event_funcs_t stored
Stored (set, but inactive) filter functions.
Definition event.c:274
static ssize_t fr_event_build_evset(UNUSED fr_event_list_t *el, struct kevent out_kev[], size_t outlen, fr_event_funcs_t *active, fr_event_fd_t *ef, fr_event_funcs_t const *new, fr_event_funcs_t const *prev)
Build a new evset based on function pointers present.
Definition event.c:621
fr_rb_tree_t * fds
Tree used to track FDs with filters in kqueue.
Definition event.c:379
bool is_registered
Whether this fr_event_fd_t's FD has been registered with kevent.
Definition event.c:280
char const * file
Source file this event was last updated in.
Definition event.c:293
fr_time_t fr_event_list_time(fr_event_list_t *el)
Get the current server time according to the event list.
Definition event.c:590
int fr_event_pre_delete(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx)
Delete a pre-event callback from the event list.
Definition event.c:1975
fr_event_list_t * el
Event list this event belongs to.
Definition event.c:299
static void event_list_reap_run_callback(fr_event_pid_reap_t *reap, pid_t pid, int status)
Saves some boilerplate...
Definition event.c:1612
int line
Line this event was last updated on.
Definition event.c:352
static int _event_fd_delete(fr_event_fd_t *ef)
Remove a file descriptor from the event loop and rbtree but don't explicitly free it.
Definition event.c:811
int _fr_event_pid_reap(NDEBUG_LOCATION_ARGS fr_event_list_t *el, pid_t pid, fr_event_pid_cb_t callback, void *uctx)
Asynchronously wait for a PID to exit, then reap it.
Definition event.c:1663
fr_event_filter_t filter
Definition event.c:266
#define FR_EVENT_FD_PCAP
Definition event.c:111
void * uctx
Context pointer to pass to each file descriptor callback.
Definition event.c:335
fr_event_status_cb_t callback
The callback to call.
Definition event.c:361
static void _fr_event_pid_reap_cb(UNUSED fr_event_list_t *el, pid_t pid, int status, void *uctx)
Does the actual reaping of PIDs.
Definition event.c:1620
int line
Line this event was last updated on.
Definition event.c:319
static size_t kevent_filter_table_len
Definition event.c:84
struct fr_event_list_pub_s pub
Next event list in the chain.
Definition event.c:378
int _fr_event_user_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_user_t **ev_p, bool trigger, fr_event_user_cb_t callback, void *uctx)
Add a user callback to the event list.
Definition event.c:1882
fr_event_fd_type_t type
Type of events we're interested in.
Definition event.c:269
static fr_table_num_sorted_t const fr_event_fd_type_table[]
Definition event.c:249
static size_t fr_event_fd_type_table_len
Definition event.c:255
uint16_t flags
Flags to use for inserting event.
Definition event.c:125
waitpid(reap->pid_ev->pid, &status, 0)
fr_event_pid_cb_t callback
callback to run when the child exits
Definition event.c:307
static int _event_list_free(fr_event_list_t *el)
Cleanup an event list.
Definition event.c:2421
bool dispatch
Whether the event list is currently dispatching events.
Definition event.c:386
fr_dlist_head_t fd_to_free
File descriptor events pending deletion.
Definition event.c:403
bool coalesce
Coalesce this map with the next.
Definition event.c:128
fr_dlist_t entry
Entry in free list.
Definition event.c:286
int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait)
Gather outstanding timer and file descriptor events.
Definition event.c:2058
static int _event_free_indexes(UNUSED void *uctx)
Free any memory we allocated for indexes.
Definition event.c:2435
fr_event_fd_cb_t fr_event_fd_cb(fr_event_fd_t *ef, int kq_filter, int kq_fflags)
Returns the appropriate callback function for a given event.
Definition event.c:1261
void * uctx
Context for the callback.
Definition event.c:371
bool is_registered
Whether this user event has been registered with the event loop.
Definition event.c:344
return processed
Definition event.c:1824
int type
Type this filter applies to.
Definition event.c:127
uint64_t fr_event_list_num_timers(fr_event_list_t *el)
Return the number of timer events currently scheduled.
Definition event.c:560
fr_event_func_map_t const * map
Function map between fr_event_funcs_t and kevent filters.
Definition event.c:278
void * uctx
Context for the callback.
Definition event.c:362
fr_event_post_cb_t callback
The callback to call.
Definition event.c:370
int _fr_event_pid_wait(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_list_t *el, fr_event_pid_t const **ev_p, pid_t pid, fr_event_pid_cb_t callback, void *uctx)
Insert a PID event into an event list.
Definition event.c:1436
char const * name
Name of the event.
Definition event.c:123
int line
Line this event was last updated on.
Definition event.c:294
uintptr_t armour
protection flag from being deleted.
Definition event.c:289
fr_event_user_cb_t callback
The callback to call.
Definition event.c:347
talloc_free(reap)
int fr_event_fd_unarmour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
Unarmour an FD.
Definition event.c:1315
int sock_type
The type of socket SOCK_STREAM, SOCK_RAW etc...
Definition event.c:271
fr_dlist_head_t pid_to_reap
A list of all orphaned child processes we're waiting to reap.
Definition event.c:395
uint64_t fr_event_list_num_fds(fr_event_list_t *el)
Return the number of file descriptors is_registered with this event loop.
Definition event.c:548
int fr_event_post_delete(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx)
Delete a post-event callback from the event list.
Definition event.c:2029
void * uctx
Context pointer to pass to each file descriptor callback.
Definition event.c:283
fr_event_func_idx_type_t idx_type
What type of index we use for event to function mapping.
Definition event.c:132
#define GET_FUNC(_ef, _offset)
void * fr_event_fd_uctx(fr_event_fd_t *ef)
Returns the uctx associated with an fr_event_fd_t handle.
Definition event.c:1269
static fr_event_fd_cb_t event_fd_func(fr_event_fd_t *ef, int *filter, int *fflags)
Figure out which function to call given a kevent.
Definition event.c:492
static int _fr_event_reap_free(fr_event_pid_reap_t *reap)
Definition event.c:1633
int kq
instance associated with this event list.
Definition event.c:390
pid_t pid
child to wait for
Definition event.c:304
static void event_pid_eval(fr_event_list_t *el, struct kevent *kev)
Evaluate a EVFILT_PROC event.
Definition event.c:1358
int fr_event_list_kq(fr_event_list_t *el)
Return the kq associated with an event list.
Definition event.c:572
void * uctx
Context for the callback.
Definition event.c:348
bool is_registered
Whether this user event has been registered with the event loop.
Definition event.c:301
fr_event_list_t * el
Event list this event belongs to.
Definition event.c:342
struct fr_event_pid::@135 early_exit
Fields that are only used if we're being triggered by a user event.
int will_exit
Will exit on next call to fr_event_corral.
Definition event.c:381
bool fr_event_list_empty(fr_event_list_t *el)
Return whether the event loop has any active events.
Definition event.c:2586
static int _event_build_indexes(UNUSED void *uctx)
Definition event.c:2443
unsigned int fr_event_list_reap_signal(fr_event_list_t *el, fr_time_delta_t timeout, int signal)
Send a signal to all the processes we have in our reap list, and reap them.
Definition event.c:1699
int16_t filter
Filter to apply.
Definition event.c:124
fr_event_list_t * fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_uctx)
Initialise a new event list.
Definition event.c:2522
static void event_fd_func_index_build(fr_event_func_map_t *map)
Definition event.c:410
static void fr_event_fd_noop(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, UNUSED void *uctx)
Placeholder callback to avoid branches in service loop.
Definition event.c:600
fr_dlist_t entry
If the fr_event_pid is in the detached, reap state, it's inserted into a list associated with the eve...
Definition event.c:329
bool fr_event_loop_exiting(fr_event_list_t *el)
Check to see whether the event loop is in the process of exiting.
Definition event.c:2382
fr_dlist_t entry
Linked list of callback.
Definition event.c:360
int _fr_event_filter_update(NDEBUG_LOCATION_ARGS fr_event_list_t *el, int fd, fr_event_filter_t filter, fr_event_update_t const updates[])
Suspend/resume a subset of filters.
Definition event.c:943
char const * file
Source file this event was last updated in.
Definition event.c:351
int num_fd_events
Number of events in this event list.
Definition event.c:388
int _fr_event_fd_move(NDEBUG_LOCATION_ARGS fr_event_list_t *dst, fr_event_list_t *src, int fd, fr_event_filter_t filter)
Move a file descriptor event from one event list to another.
Definition event.c:892
fr_event_func_map_entry_t ** ev_to_func
Function -> Event maps in index order.
Definition event.c:135
int _fr_event_fd_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd, fr_event_fd_cb_t read_fn, fr_event_fd_cb_t write_fn, fr_event_error_cb_t error, void *uctx)
Associate I/O callbacks with a file descriptor.
Definition event.c:1176
fr_event_fd_type_t
Definition event.c:90
@ FR_EVENT_FD_FILE
is a file.
Definition event.c:92
@ FR_EVENT_FD_DIRECTORY
is a directory.
Definition event.c:93
@ FR_EVENT_FD_SOCKET
is a socket.
Definition event.c:91
fr_event_pid_t const * pid_ev
pid_ev this reaper is bound to.
Definition event.c:327
fr_event_funcs_t active
Active filter functions.
Definition event.c:273
int fr_event_pre_insert(fr_event_list_t *el, fr_event_status_cb_t callback, void *uctx)
Add a pre-event callback to the event list.
Definition event.c:1953
static void _fr_event_pid_early_exit(fr_event_list_t *el, void *uctx)
Called on the next loop through the event loop when inserting an EVFILT_PROC event fails.
Definition event.c:1405
static void event_user_eval(fr_event_list_t *el, struct kevent *kev)
Definition event.c:1854
int exit
If non-zero event loop will prevent the addition of new events, and will return immediately from the ...
Definition event.c:382
fr_event_list_t * el
Event list this event belongs to.
Definition event.c:265
static fr_table_num_sorted_t const kevent_filter_table[]
Definition event.c:67
TALLOC_CTX * linked_ctx
talloc ctx this event was bound to.
Definition event.c:284
static void event_callback(fr_event_list_t *el, fr_event_fd_t *ef, int *filter, int flags, int *fflags)
Definition event.c:2179
void fr_event_loop_exit(fr_event_list_t *el, int code)
Signal an event loop exit with the specified code.
Definition event.c:2371
#define FR_EV_BATCH_FDS
Definition event.c:57
void * uctx
Context pointer to pass to each file descriptor callback.
Definition event.c:308
static int _event_pid_free(fr_event_pid_t *ev)
Remove PID wait event from kevent if the fr_event_pid_t is freed.
Definition event.c:1337
fr_event_list_t * el
Event list this event belongs to.
Definition event.c:326
int fd
File descriptor we're listening for events on.
Definition event.c:267
size_t offset
Offset of function pointer in structure.
Definition event.c:122
int fr_event_fd_delete(fr_event_list_t *el, int fd, fr_event_filter_t filter)
Remove a file descriptor from the event loop.
Definition event.c:1203
fr_dlist_t entry
Linked list of callback.
Definition event.c:369
int fr_event_loop(fr_event_list_t *el)
Run an event loop.
Definition event.c:2393
int fr_event_user_trigger(fr_event_user_t *ev)
Trigger a user event.
Definition event.c:1927
fr_event_fd_t * fr_event_fd_handle(fr_event_list_t *el, int fd, fr_event_filter_t filter)
Get the opaque event handle from a file descriptor.
Definition event.c:1239
fr_rb_node_t node
Entry in the tree of file descriptor handles.
Definition event.c:261
int _fr_event_filter_insert(NDEBUG_LOCATION_ARGS TALLOC_CTX *ctx, fr_event_fd_t **ef_out, fr_event_list_t *el, int fd, fr_event_filter_t filter, void *funcs, fr_event_error_cb_t error, void *uctx)
Insert a filter for the specified fd.
Definition event.c:1020
#define NOTE_EXITSTATUS
int fr_event_post_insert(fr_event_list_t *el, fr_event_post_cb_t callback, void *uctx)
Add a post-event callback to the event list.
Definition event.c:2007
fr_event_pid_t const ** parent
Definition event.c:305
static int _event_user_delete(fr_event_user_t *ev)
Memory will not be freed if we fail to remove the event from the kqueue.
Definition event.c:1836
struct kevent events[FR_EV_BATCH_FDS]
Definition event.c:398
fr_event_func_idx_type_t
Definition event.c:100
@ FR_EVENT_FUNC_IDX_FILTER
Sign flip is performed i.e. -1 = 0The filter is used / as the index in the ev to func index.
Definition event.c:103
@ FR_EVENT_FUNC_IDX_NONE
Definition event.c:101
@ FR_EVENT_FUNC_IDX_FFLAGS
The bit position of the flags in FFLAGS is used to provide the index.
Definition event.c:105
int fr_event_fd_armour(fr_event_list_t *el, int fd, fr_event_filter_t filter, uintptr_t armour)
Armour an FD.
Definition event.c:1285
bool in_handler
Deletes should be deferred until after the handlers complete.
Definition event.c:400
uint32_t fflags
fflags to pass to filter.
Definition event.c:126
A file descriptor/filter event.
Definition event.c:260
Specifies a mapping between a function pointer in a structure and its respective event.
Definition event.c:121
Stores all information relating to an event list.
Definition event.c:377
Hold additional information for automatically reaped PIDs.
Definition event.c:325
Callbacks to perform after all timers and FDs have been checked.
Definition event.c:368
Callbacks to perform when the event handler is about to check the events.
Definition event.c:359
Callbacks for kevent() user events.
Definition event.c:341
int fr_debug_lvl
Definition log.c:40
fr_log_t default_log
Definition log.c:288
void fr_vlog(fr_log_t const *log, fr_log_type_t type, char const *file, int line, char const *fmt, va_list ap)
Send a server log message to its destination.
Definition log.c:341
@ L_DBG_LVL_3
3rd highest priority debug messages (-xxx | -Xx).
Definition log.h:72
@ L_DBG
Only displayed when debugging is enabled.
Definition log.h:59
static uint8_t fr_high_bit_pos(uint64_t num)
Find the highest order high bit in an unsigned 64 bit integer.
Definition math.h:94
unsigned short uint16_t
unsigned int uint32_t
long int ssize_t
unsigned char uint8_t
unsigned long int size_t
#define fr_assert(_expr)
Definition rad_assert.h:38
void fr_rand_init(void)
Definition rand.c:34
uint32_t randrsl[256]
Definition rand.h:40
uint32_t randcnt
Definition rand.h:39
uint32_t fr_rb_num_elements(fr_rb_tree_t *tree)
Return how many nodes there are in a tree.
Definition rb.c:781
void * fr_rb_find(fr_rb_tree_t const *tree, void const *data)
Find an element in the tree, returning the data, not the node.
Definition rb.c:577
bool fr_rb_insert(fr_rb_tree_t *tree, void const *data)
Insert data into a tree.
Definition rb.c:626
bool fr_rb_delete(fr_rb_tree_t *tree, void const *data)
Remove node and free data (if a free function was specified)
Definition rb.c:741
#define fr_rb_inline_talloc_alloc(_ctx, _type, _field, _data_cmp, _data_free)
Allocs a red black that verifies elements are of a specific talloc type.
Definition rb.h:246
The main red black tree structure.
Definition rb.h:73
return count
Definition module.c:155
char const * fr_syserror(int num)
Guaranteed to be thread-safe version of strerror.
Definition syserror.c:243
#define fr_table_str_by_value(_table, _number, _def)
Convert an integer to a string.
Definition table.h:772
An element in a lexicographically sorted array of name to num mappings.
Definition table.h:49
int talloc_link_ctx(TALLOC_CTX *parent, TALLOC_CTX *child)
Link two different parent and child contexts, so the child is freed before the parent.
Definition talloc.c:167
#define fr_time_delta_to_timespec(_delta)
Convert a delta to a timespec.
Definition time.h:666
static fr_time_delta_t fr_time_delta_from_sec(int64_t sec)
Definition time.h:590
#define fr_time_delta_wrap(_time)
Definition time.h:152
#define fr_time_wrap(_time)
Definition time.h:145
#define fr_time_lteq(_a, _b)
Definition time.h:240
#define fr_time_delta_ispos(_a)
Definition time.h:290
#define fr_time_eq(_a, _b)
Definition time.h:241
static int64_t fr_time_to_usec(fr_time_t when)
Convert an fr_time_t (internal time) to number of usec since the unix epoch (wallclock time)
Definition time.h:701
#define fr_time_add(_a, _b)
Add a time/time delta together.
Definition time.h:196
#define fr_time_gt(_a, _b)
Definition time.h:237
#define USEC
Definition time.h:380
#define fr_time_sub(_a, _b)
Subtract one time from another.
Definition time.h:229
#define fr_time_neq(_a, _b)
Definition time.h:242
A time delta, a difference in time measured in nanoseconds.
Definition time.h:80
"server local" time.
Definition time.h:69
int fr_timer_list_run(fr_timer_list_t *tl, fr_time_t *when)
Execute any pending events in the event loop.
Definition timer.c:939
uint64_t fr_timer_list_num_events(fr_timer_list_t *tl)
Return number of pending events.
Definition timer.c:1150
fr_time_t fr_timer_list_when(fr_timer_list_t *tl)
Return the time of the next event.
Definition timer.c:1187
fr_timer_list_t * fr_timer_list_lst_alloc(TALLOC_CTX *ctx, fr_timer_list_t *parent)
Allocate a new lst based timer list.
Definition timer.c:1262
An event timer list.
Definition timer.c:50
A timer event.
Definition timer.c:84
#define fr_timer_at(...)
Definition timer.h:81
int trigger(unlang_interpret_t *intp, CONF_SECTION const *cs, CONF_PAIR **trigger_cp, char const *name, bool rate_limit, fr_pair_list_t *args)
Execute a trigger - call an executable to process an event.
Definition trigger.c:156
close(uq->fd)
static fr_event_list_t * el
void fr_strerror_clear(void)
Clears all pending messages from the talloc pools.
Definition strerror.c:576
#define fr_strerror_printf(_fmt,...)
Log to thread local error buffer.
Definition strerror.h:64
#define fr_strerror_printf_push(_fmt,...)
Add a message to an existing stack of messages at the tail.
Definition strerror.h:84
#define fr_strerror_const(_msg)
Definition strerror.h:223
int nonnull(2, 5))
static size_t char ** out
Definition value.h:1023