The FreeRADIUS server $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
Loading...
Searching...
No Matches
trunk.c
Go to the documentation of this file.
1/*
2 * This program is is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or (at
5 * your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
15 */
16
17/**
18 * $Id: b29bba6f50a82b08ecdb52ac03b2a82e4d40f09c $
19 *
20 * @file src/lib/server/trunk.c
21 * @brief A management API for bonding multiple connections together.
22 *
23 * @copyright 2019-2020 Arran Cudbard-Bell (a.cudbardb@freeradius.org)
24 * @copyright 2019-2020 The FreeRADIUS server project
25 */
26
27#define LOG_PREFIX trunk->log_prefix
28
29#ifdef NDEBUG
30# define TALLOC_GET_TYPE_ABORT_NOOP 1
31#endif
32
35typedef struct trunk_s trunk_t;
36#define _TRUNK_PRIVATE 1
37#include <freeradius-devel/server/trunk.h>
38
39#include <freeradius-devel/server/trigger.h>
40#include <freeradius-devel/util/debug.h>
41#include <freeradius-devel/util/misc.h>
42#include <freeradius-devel/util/syserror.h>
43#include <freeradius-devel/util/minmax_heap.h>
44
45#ifdef HAVE_STDATOMIC_H
46# include <stdatomic.h>
47# ifndef ATOMIC_VAR_INIT
48# define ATOMIC_VAR_INIT(_x) (_x)
49# endif
50#else
51# include <freeradius-devel/util/stdatomic.h>
52#endif
53
54static atomic_uint_fast64_t request_counter = ATOMIC_VAR_INIT(1);
55
56#ifdef TESTING_TRUNK
58
59static fr_time_t test_time(void)
60{
61 return test_time_base;
62}
63
64#define fr_time test_time
65#endif
66
67#ifndef NDEBUG
68/** The maximum number of state logs to record per request
69 *
70 */
71#define TRUNK_REQUEST_STATE_LOG_MAX 20
72
73/** Trace state machine changes for a particular request
74 *
75 */
76typedef struct {
77 fr_dlist_head_t *log_head; //!< To allow the log entry to remove itself on free.
78 fr_dlist_t entry; //!< Entry in the linked list.
79 trunk_request_state_t from; //!< What state we transitioned from.
80 trunk_request_state_t to; //!< What state we transitioned to.
81
82 trunk_connection_t *tconn; //!< The request was associated with.
83 ///< Pointer may now be invalid, do no de-reference.
84
85 uint64_t tconn_id; //!< If the treq was associated with a connection
86 ///< the connection ID.
87 trunk_connection_state_t tconn_state; //!< If the treq was associated with a connection
88 ///< the connection state at the time of the
89 ///< state transition.
90
91 char const *function; //!< State change occurred in.
92 int line; //!< Line change occurred on.
94#endif
95
96/** Wraps a normal request
97 *
98 */
100 struct trunk_request_pub_s pub; //!< Public fields in the trunk request.
101 ///< This *MUST* be the first field in this
102 ///< structure.
103
104 uint64_t id; //!< Trunk request ID.
105
106 fr_heap_index_t heap_id; //!< Used to track the request conn->pending heap.
107
108 fr_dlist_t entry; //!< Used to track the trunk request in the conn->sent
109 ///< or trunk->backlog request.
110
111 trunk_cancel_reason_t cancel_reason; //!< Why this request was cancelled.
112
113 fr_time_t last_freed; //!< Last time this request was freed.
114
115 bool bound_to_conn; //!< Fail the request if there's an attempt to
116 ///< re-enqueue it.
117
118 bool sent; //!< Trunk request has been sent at least once.
119 ///< Used so that re-queueing doesn't increase trunk
120 ///< `sent` count.
121
122#ifndef NDEBUG
123 fr_dlist_head_t log; //!< State change log.
124#endif
125};
126
127
128/** Associates request queues with a connection
129 *
130 * @dotfile src/lib/server/trunk_conn.gv "Trunk connection state machine"
131 * @dotfile src/lib/server/trunk_req.gv "Trunk request state machine"
132 */
134 struct trunk_connection_pub_s pub; //!< Public fields in the trunk connection.
135 ///< This *MUST* be the first field in this
136 ///< structure.
137
138 fr_heap_index_t heap_id; //!< Used to track the connection in the connected
139 ///< heap.
140
141 fr_dlist_t entry; //!< Used to track the connection in the connecting,
142 ///< full and failed lists.
143
144 /** @name State
145 * @{
146 */
147 trunk_connection_event_t events; //!< The current events we expect to be notified on.
148 /** @} */
149
150 /** @name Request lists
151 * @{
152 */
153 fr_heap_t *pending; //!< Requests waiting to be sent.
154
155 trunk_request_t *partial; //!< Partially written request.
156
157 fr_dlist_head_t sent; //!< Sent request.
158
159 fr_dlist_head_t reapable; //!< Idle request.
160
161 fr_dlist_head_t cancel; //!< Requests in the cancel state.
162
163 trunk_request_t *cancel_partial; //!< Partially written cancellation request.
164
165 fr_dlist_head_t cancel_sent; //!< Sent cancellation request.
166 /** @} */
167
168 /** @name Statistics
169 * @{
170 */
171 uint64_t sent_count; //!< The number of requests that have been sent using
172 ///< this connection.
173 /** @} */
174
175 /** @name Timers
176 * @{
177 */
178 fr_timer_t *lifetime_ev; //!< Maximum time this connection can be open.
179 /** @} */
180};
181
182/** An entry in a trunk watch function list
183 *
184 */
185typedef struct trunk_watch_entry_s {
186 fr_dlist_t entry; //!< List entry.
187 trunk_watch_t func; //!< Function to call when a trunk enters
188 ///< the state this list belongs to
189 bool oneshot; //!< Remove the function after it's called once.
190 bool enabled; //!< Whether the watch entry is enabled.
191 void *uctx; //!< User data to pass to the function.
193
194/** Main trunk management handle
195 *
196 */
197struct trunk_s {
198 struct trunk_pub_s pub; //!< Public fields in the trunk connection.
199 ///< This *MUST* be the first field in this
200 ///< structure.
201
202 char const *log_prefix; //!< What to prepend to messages.
203
204 fr_event_list_t *el; //!< Event list used by this trunk and the connection.
205
206 trunk_conf_t conf; //!< Trunk common configuration.
207
208 fr_dlist_head_t free_requests; //!< Requests in the unassigned state. Waiting to be
209 ///< enqueued.
210
211 fr_heap_t *backlog; //!< The request backlog. Requests we couldn't
212 ///< immediately assign to a connection.
213
214 /** @name Connection lists
215 *
216 * A connection must always be in exactly one of these lists
217 * or trees.
218 *
219 * @{
220 */
221 fr_dlist_head_t init; //!< Connections which have not yet started
222 ///< connecting.
223
224 fr_dlist_head_t connecting; //!< Connections which are not yet in the open state.
225
226 fr_minmax_heap_t *active; //!< Connections which can service requests.
227
228 fr_dlist_head_t full; //!< Connections which have too many outstanding
229 ///< requests.
230
231 fr_dlist_head_t inactive; //!< Connections which have been signalled to be
232 ///< inactive by the API client.
233
234 fr_dlist_head_t inactive_draining; //!< Connections which have been signalled to be
235 ///< inactive by the API client, which the trunk
236 ///< manager is draining to close.
237
238 fr_dlist_head_t failed; //!< Connections that'll be reconnected shortly.
239
240 fr_dlist_head_t closed; //!< Connections that have closed. Either due to
241 ///< shutdown, reconnection or failure.
242
243 fr_dlist_head_t draining; //!< Connections that will be freed once all their
244 ///< requests are complete, but can be reactivated.
245
246 fr_dlist_head_t draining_to_free; //!< Connections that will be freed once all their
247 ///< requests are complete.
248
249 fr_dlist_head_t to_free; //!< Connections we're done with and will free on
250 //!< the next call to trunk_manage.
251 //!< This prevents connections from being freed
252 //!< whilst we're inside callbacks.
253 /** @} */
254
255 /** @name Callbacks
256 * @{
257 */
258 trunk_io_funcs_t funcs; //!< I/O functions.
259
260 void *in_handler; //!< Which handler we're inside.
261
262 void *uctx; //!< Uctx data to pass to alloc.
263
264 fr_dlist_head_t watch[TRUNK_STATE_MAX]; //!< To be called when trunk changes state.
265
266 trunk_watch_entry_t *next_watcher; //!< Watcher about to be run. Used to prevent nested watchers.
267 /** @} */
268
269 /** @name Timers
270 * @{
271 */
272 fr_timer_t *manage_ev; //!< Periodic connection management event.
273 /** @} */
274
275 /** @name Log rate limiting entries
276 * @{
277 */
278 fr_rate_limit_t limit_max_requests_alloc_log; //!< Rate limit on "Refusing to alloc requests - Limit of * requests reached"
279
280 fr_rate_limit_t limit_last_failure_log; //!< Rate limit on "Refusing to enqueue requests - No active conns"
281 /** @} */
282
283 /** @name State
284 * @{
285 */
286 bool freeing; //!< Trunk is being freed, don't spawn new
287 ///< connections or re-enqueue.
288
289 bool started; //!< Has the trunk been started.
290
291 bool managing_connections; //!< Whether the trunk is allowed to manage
292 ///< (open/close) connections.
293
294 uint64_t last_req_per_conn; //!< The last request to connection ratio we calculated.
295 /** @} */
296};
297
299 { FR_CONF_OFFSET("per_connection_max", trunk_conf_t, max_req_per_conn), .dflt = "2000" },
300 { FR_CONF_OFFSET("per_connection_target", trunk_conf_t, target_req_per_conn), .dflt = "1000" },
301 { FR_CONF_OFFSET("free_delay", trunk_conf_t, req_cleanup_delay), .dflt = "10.0" },
302
304};
305
307 { FR_CONF_OFFSET("connect_timeout", connection_conf_t, connection_timeout), .dflt = "3.0" },
308 { FR_CONF_OFFSET("reconnect_delay", connection_conf_t, reconnection_delay), .dflt = "1" },
309
311};
312
313#ifndef TRUNK_TESTS
315 { FR_CONF_OFFSET("start", trunk_conf_t, start), .dflt = "1" },
316 { FR_CONF_OFFSET("min", trunk_conf_t, min), .dflt = "1" },
317 { FR_CONF_OFFSET("max", trunk_conf_t, max), .dflt = "5" },
318 { FR_CONF_OFFSET("connecting", trunk_conf_t, connecting), .dflt = "2" },
319 { FR_CONF_OFFSET("uses", trunk_conf_t, max_uses), .dflt = "0" },
320 { FR_CONF_OFFSET("lifetime", trunk_conf_t, lifetime), .dflt = "0" },
321 { FR_CONF_OFFSET("idle_timeout", trunk_conf_t, idle_timeout), .dflt = "0" },
322
323 { FR_CONF_OFFSET("open_delay", trunk_conf_t, open_delay), .dflt = "0.2" },
324 { FR_CONF_OFFSET("close_delay", trunk_conf_t, close_delay), .dflt = "10.0" },
325
326 { FR_CONF_OFFSET("manage_interval", trunk_conf_t, manage_interval), .dflt = "0.2" },
327
328 { FR_CONF_OFFSET("max_backlog", trunk_conf_t, max_backlog), .dflt = "1000" },
329
330 { FR_CONF_OFFSET_SUBSECTION("connection", 0, trunk_conf_t, conn_conf, trunk_config_connection), .subcs_size = sizeof(trunk_config_connection) },
331 { FR_CONF_POINTER("request", 0, CONF_FLAG_SUBSECTION, NULL), .subcs = (void const *) trunk_config_request },
332
334};
335#endif
336
337#ifndef NDEBUG
338/** Map request states to trigger names
339 *
340 * Must stay in the same order as #trunk_connection_state_t
341 */
343 { L("pool.request_init"), TRUNK_REQUEST_STATE_INIT }, /* 0x0000 - bit 0 */
344 { L("pool.request_unassigned"), TRUNK_REQUEST_STATE_UNASSIGNED }, /* 0x0001 - bit 1 */
345 { L("pool.request_backlog"), TRUNK_REQUEST_STATE_BACKLOG }, /* 0x0002 - bit 2 */
346 { L("pool.request_pending"), TRUNK_REQUEST_STATE_PENDING }, /* 0x0004 - bit 3 */
347 { L("pool.request_partial"), TRUNK_REQUEST_STATE_PARTIAL }, /* 0x0008 - bit 4 */
348 { L("pool.request_sent"), TRUNK_REQUEST_STATE_SENT }, /* 0x0010 - bit 5 */
349 { L("pool.request_state_reapable"), TRUNK_REQUEST_STATE_REAPABLE }, /* 0x0020 - bit 6 */
350 { L("pool.request_complete"), TRUNK_REQUEST_STATE_COMPLETE }, /* 0x0040 - bit 7 */
351 { L("pool.request_state_failed"), TRUNK_REQUEST_STATE_FAILED }, /* 0x0080 - bit 8 */
352 { L("pool.request_state_cancel"), TRUNK_REQUEST_STATE_CANCEL }, /* 0x0100 - bit 9 */
353 { L("pool.request_state_cancel_sent"), TRUNK_REQUEST_STATE_CANCEL_SENT }, /* 0x0200 - bit 10 */
354 { L("pool.request_state_cancel_partial"), TRUNK_REQUEST_STATE_CANCEL_PARTIAL }, /* 0x0400 - bit 11 */
355 { L("pool.request_state_cancel_complete"), TRUNK_REQUEST_STATE_CANCEL_COMPLETE }, /* 0x0800 - bit 12 */
356};
358#endif
359
361 { L("INIT"), TRUNK_REQUEST_STATE_INIT },
362 { L("UNASSIGNED"), TRUNK_REQUEST_STATE_UNASSIGNED },
363 { L("BACKLOG"), TRUNK_REQUEST_STATE_BACKLOG },
364 { L("PENDING"), TRUNK_REQUEST_STATE_PENDING },
365 { L("PARTIAL"), TRUNK_REQUEST_STATE_PARTIAL },
366 { L("SENT"), TRUNK_REQUEST_STATE_SENT },
367 { L("REAPABLE"), TRUNK_REQUEST_STATE_REAPABLE },
368 { L("COMPLETE"), TRUNK_REQUEST_STATE_COMPLETE },
369 { L("FAILED"), TRUNK_REQUEST_STATE_FAILED },
370 { L("CANCEL"), TRUNK_REQUEST_STATE_CANCEL },
371 { L("CANCEL-SENT"), TRUNK_REQUEST_STATE_CANCEL_SENT },
372 { L("CANCEL-PARTIAL"), TRUNK_REQUEST_STATE_CANCEL_PARTIAL },
373 { L("CANCEL-COMPLETE"), TRUNK_REQUEST_STATE_CANCEL_COMPLETE }
374};
376
377/** Map connection states to trigger names
378 *
379 * Must stay in the same order as #trunk_connection_state_t
380 */
382 { L("pool.connection_halted"), TRUNK_CONN_HALTED }, /* 0x0000 - bit 0 */
383 { L("pool.connection_init"), TRUNK_CONN_INIT }, /* 0x0001 - bit 1 */
384 { L("pool.connection_connecting"), TRUNK_CONN_CONNECTING }, /* 0x0002 - bit 2 */
385 { L("pool.connection_active"), TRUNK_CONN_ACTIVE }, /* 0x0004 - bit 3 */
386 { L("pool.connection_closed"), TRUNK_CONN_CLOSED }, /* 0x0008 - bit 4 */
387 { L("pool.connection_full"), TRUNK_CONN_FULL }, /* 0x0010 - bit 5 */
388 { L("pool.connection_inactive"), TRUNK_CONN_INACTIVE }, /* 0x0020 - bit 6 */
389 { L("pool.connection_inactive_draining"), TRUNK_CONN_INACTIVE_DRAINING }, /* 0x0040 - bit 7 */
390 { L("pool.connection_draining"), TRUNK_CONN_DRAINING }, /* 0x0080 - bit 8 */
391 { L("pool.connection_draining_to_free"), TRUNK_CONN_DRAINING_TO_FREE } /* 0x0100 - bit 9 */
392};
394
396 { L("IDLE"), TRUNK_STATE_IDLE },
397 { L("ACTIVE"), TRUNK_STATE_ACTIVE },
398 { L("PENDING"), TRUNK_STATE_PENDING }
399};
401
403 { L("INIT"), TRUNK_CONN_INIT },
404 { L("HALTED"), TRUNK_CONN_HALTED },
405 { L("CONNECTING"), TRUNK_CONN_CONNECTING },
406 { L("ACTIVE"), TRUNK_CONN_ACTIVE },
407 { L("CLOSED"), TRUNK_CONN_CLOSED },
408 { L("FULL"), TRUNK_CONN_FULL },
409 { L("INACTIVE"), TRUNK_CONN_INACTIVE },
410 { L("INACTIVE-DRAINING"), TRUNK_CONN_INACTIVE_DRAINING },
411 { L("DRAINING"), TRUNK_CONN_DRAINING },
412 { L("DRAINING-TO-FREE"), TRUNK_CONN_DRAINING_TO_FREE }
413};
415
417 { L("TRUNK_CANCEL_REASON_NONE"), TRUNK_CANCEL_REASON_NONE },
418 { L("TRUNK_CANCEL_REASON_SIGNAL"), TRUNK_CANCEL_REASON_SIGNAL },
419 { L("TRUNK_CANCEL_REASON_MOVE"), TRUNK_CANCEL_REASON_MOVE },
420 { L("TRUNK_CANCEL_REASON_REQUEUE"), TRUNK_CANCEL_REASON_REQUEUE }
421};
423
425 { L("TRUNK_CONN_EVENT_NONE"), TRUNK_CONN_EVENT_NONE },
426 { L("TRUNK_CONN_EVENT_READ"), TRUNK_CONN_EVENT_READ },
427 { L("TRUNK_CONN_EVENT_WRITE"), TRUNK_CONN_EVENT_WRITE },
428 { L("TRUNK_CONN_EVENT_BOTH"), TRUNK_CONN_EVENT_BOTH },
429};
431
432#define CONN_TRIGGER(_state) do { \
433 if (trunk->pub.triggers) { \
434 trigger_exec(unlang_interpret_get_thread_default(), \
435 NULL, fr_table_str_by_value(trunk_conn_trigger_names, _state, \
436 "<INVALID>"), true, NULL); \
437 } \
438} while (0)
439
440#define CONN_STATE_TRANSITION(_new, _log) \
441do { \
442 _log("[%" PRIu64 "] Trunk connection changed state %s -> %s", \
443 tconn->pub.conn->id, \
444 fr_table_str_by_value(trunk_connection_states, tconn->pub.state, "<INVALID>"), \
445 fr_table_str_by_value(trunk_connection_states, _new, "<INVALID>")); \
446 tconn->pub.state = _new; \
447 CONN_TRIGGER(_new); \
448 trunk_requests_per_connection(NULL, NULL, trunk, fr_time(), false); \
449} while (0)
450
451#define CONN_BAD_STATE_TRANSITION(_new) \
452do { \
453 if (!fr_cond_assert_msg(0, "[%" PRIu64 "] Trunk connection invalid transition %s -> %s", \
454 tconn->pub.conn->id, \
455 fr_table_str_by_value(trunk_connection_states, tconn->pub.state, "<INVALID>"), \
456 fr_table_str_by_value(trunk_connection_states, _new, "<INVALID>"))) return; \
457} while (0)
458
459#ifndef NDEBUG
460void trunk_request_state_log_entry_add(char const *function, int line,
461 trunk_request_t *treq, trunk_request_state_t new) CC_HINT(nonnull);
462
463#define REQUEST_TRIGGER(_state) do { \
464 if (trunk->pub.triggers) { \
465 trigger_exec(unlang_interpret_get_thread_default(), \
466 NULL, fr_table_str_by_value(trunk_req_trigger_names, _state, \
467 "<INVALID>"), true, NULL); \
468 } \
469} while (0)
470
471/** Record a request state transition and log appropriate output
472 *
473 */
474#define REQUEST_STATE_TRANSITION(_new) \
475do { \
476 request_t *request = treq->pub.request; \
477 ROPTIONAL(RDEBUG3, DEBUG3, "Trunk request %" PRIu64 " changed state %s -> %s", \
478 treq->id, \
479 fr_table_str_by_value(trunk_request_states, treq->pub.state, "<INVALID>"), \
480 fr_table_str_by_value(trunk_request_states, _new, "<INVALID>")); \
481 trunk_request_state_log_entry_add(__FUNCTION__, __LINE__, treq, _new); \
482 treq->pub.state = _new; \
483 REQUEST_TRIGGER(_new); \
484} while (0)
485#define REQUEST_BAD_STATE_TRANSITION(_new) \
486do { \
487 trunk_request_state_log(&default_log, L_ERR, __FILE__, __LINE__, treq); \
488 if (!fr_cond_assert_msg(0, "Trunk request %" PRIu64 " invalid transition %s -> %s", \
489 treq->id, \
490 fr_table_str_by_value(trunk_request_states, treq->pub.state, "<INVALID>"), \
491 fr_table_str_by_value(trunk_request_states, _new, "<INVALID>"))) return; \
492} while (0)
493#else
494/** Record a request state transition
495 *
496 */
497#define REQUEST_STATE_TRANSITION(_new) \
498do { \
499 request_t *request = treq->pub.request; \
500 ROPTIONAL(RDEBUG3, DEBUG3, "Trunk request %" PRIu64 " changed state %s -> %s", \
501 treq->id, \
502 fr_table_str_by_value(trunk_request_states, treq->pub.state, "<INVALID>"), \
503 fr_table_str_by_value(trunk_request_states, _new, "<INVALID>")); \
504 treq->pub.state = _new; \
505} while (0)
506#define REQUEST_BAD_STATE_TRANSITION(_new) \
507do { \
508 if (!fr_cond_assert_msg(0, "Trunk request %" PRIu64 " invalid transition %s -> %s", \
509 treq->id, \
510 fr_table_str_by_value(trunk_request_states, treq->pub.state, "<INVALID>"), \
511 fr_table_str_by_value(trunk_request_states, _new, "<INVALID>"))) return; \
512} while (0)
513#endif
514
515
516/** Call the cancel callback if set
517 *
518 */
519#define DO_REQUEST_CANCEL(_treq, _reason) \
520do { \
521 if ((_treq)->pub.trunk->funcs.request_cancel) { \
522 request_t *request = (_treq)->pub.request; \
523 void *_prev = (_treq)->pub.trunk->in_handler; \
524 (_treq)->pub.trunk->in_handler = (void *)(_treq)->pub.trunk->funcs.request_cancel; \
525 ROPTIONAL(RDEBUG3, DEBUG3, "Calling request_cancel(conn=%p, preq=%p, reason=%s, uctx=%p)", \
526 (_treq)->pub.tconn->pub.conn, \
527 (_treq)->pub.preq, \
528 fr_table_str_by_value(trunk_cancellation_reasons, \
529 (_reason), \
530 "<INVALID>"), \
531 (_treq)->pub.trunk->uctx); \
532 (_treq)->pub.trunk->funcs.request_cancel((_treq)->pub.tconn->pub.conn, (_treq)->pub.preq, (_reason), (_treq)->pub.trunk->uctx); \
533 (_treq)->pub.trunk->in_handler = _prev; \
534 } \
535} while(0)
536
537/** Call the "conn_release" callback (if set)
538 *
539 */
540#define DO_REQUEST_CONN_RELEASE(_treq) \
541do { \
542 if ((_treq)->pub.trunk->funcs.request_conn_release) { \
543 request_t *request = (_treq)->pub.request; \
544 void *_prev = (_treq)->pub.trunk->in_handler; \
545 (_treq)->pub.trunk->in_handler = (void *)(_treq)->pub.trunk->funcs.request_conn_release; \
546 ROPTIONAL(RDEBUG3, DEBUG3, "Calling request_conn_release(conn=%p, preq=%p, uctx=%p)", \
547 (_treq)->pub.tconn->pub.conn, \
548 (_treq)->pub.preq, \
549 (_treq)->pub.trunk->uctx); \
550 (_treq)->pub.trunk->funcs.request_conn_release((_treq)->pub.tconn->pub.conn, (_treq)->pub.preq, (_treq)->pub.trunk->uctx); \
551 (_treq)->pub.trunk->in_handler = _prev; \
552 } \
553} while(0)
554
555/** Call the complete callback (if set)
556 *
557 */
558#define DO_REQUEST_COMPLETE(_treq) \
559do { \
560 if ((_treq)->pub.trunk->funcs.request_complete) { \
561 request_t *request = (_treq)->pub.request; \
562 void *_prev = (_treq)->pub.trunk->in_handler; \
563 ROPTIONAL(RDEBUG3, DEBUG3, "Calling request_complete(request=%p, preq=%p, rctx=%p, uctx=%p)", \
564 (_treq)->pub.request, \
565 (_treq)->pub.preq, \
566 (_treq)->pub.rctx, \
567 (_treq)->pub.trunk->uctx); \
568 (_treq)->pub.trunk->in_handler = (void *)(_treq)->pub.trunk->funcs.request_complete; \
569 (_treq)->pub.trunk->funcs.request_complete((_treq)->pub.request, (_treq)->pub.preq, (_treq)->pub.rctx, (_treq)->pub.trunk->uctx); \
570 (_treq)->pub.trunk->in_handler = _prev; \
571 } \
572} while(0)
573
574/** Call the fail callback (if set)
575 *
576 */
577#define DO_REQUEST_FAIL(_treq, _prev_state) \
578do { \
579 if ((_treq)->pub.trunk->funcs.request_fail) { \
580 request_t *request = (_treq)->pub.request; \
581 void *_prev = (_treq)->pub.trunk->in_handler; \
582 ROPTIONAL(RDEBUG3, DEBUG3, "Calling request_fail(request=%p, preq=%p, rctx=%p, state=%s uctx=%p)", \
583 (_treq)->pub.request, \
584 (_treq)->pub.preq, \
585 (_treq)->pub.rctx, \
586 fr_table_str_by_value(trunk_request_states, (_prev_state), "<INVALID>"), \
587 (_treq)->pub.trunk->uctx); \
588 (_treq)->pub.trunk->in_handler = (void *)(_treq)->pub.trunk->funcs.request_fail; \
589 (_treq)->pub.trunk->funcs.request_fail((_treq)->pub.request, (_treq)->pub.preq, (_treq)->pub.rctx, _prev_state, (_treq)->pub.trunk->uctx); \
590 (_treq)->pub.trunk->in_handler = _prev; \
591 } \
592} while(0)
593
594/** Call the free callback (if set)
595 *
596 */
597#define DO_REQUEST_FREE(_treq) \
598do { \
599 if ((_treq)->pub.trunk->funcs.request_free) { \
600 request_t *request = (_treq)->pub.request; \
601 void *_prev = (_treq)->pub.trunk->in_handler; \
602 ROPTIONAL(RDEBUG3, DEBUG3, "Calling request_free(request=%p, preq=%p, uctx=%p)", \
603 (_treq)->pub.request, \
604 (_treq)->pub.preq, \
605 (_treq)->pub.trunk->uctx); \
606 (_treq)->pub.trunk->in_handler = (void *)(_treq)->pub.trunk->funcs.request_free; \
607 (_treq)->pub.trunk->funcs.request_free((_treq)->pub.request, (_treq)->pub.preq, (_treq)->pub.trunk->uctx); \
608 (_treq)->pub.trunk->in_handler = _prev; \
609 } \
610} while(0)
611
612/** Write one or more requests to a connection
613 *
614 */
615#define DO_REQUEST_MUX(_tconn) \
616do { \
617 void *_prev = (_tconn)->pub.trunk->in_handler; \
618 DEBUG3("[%" PRIu64 "] Calling request_mux(el=%p, tconn=%p, conn=%p, uctx=%p)", \
619 (_tconn)->pub.conn->id, \
620 (_tconn)->pub.trunk->el, \
621 (_tconn), \
622 (_tconn)->pub.conn, \
623 (_tconn)->pub.trunk->uctx); \
624 (_tconn)->pub.trunk->in_handler = (void *)(_tconn)->pub.trunk->funcs.request_mux; \
625 (_tconn)->pub.trunk->funcs.request_mux((_tconn)->pub.trunk->el, (_tconn), (_tconn)->pub.conn, (_tconn)->pub.trunk->uctx); \
626 (_tconn)->pub.trunk->in_handler = _prev; \
627} while(0)
628
629/** Read one or more requests from a connection
630 *
631 */
632#define DO_REQUEST_DEMUX(_tconn) \
633do { \
634 void *_prev = (_tconn)->pub.trunk->in_handler; \
635 DEBUG3("[%" PRIu64 "] Calling request_demux(tconn=%p, conn=%p, uctx=%p)", \
636 (_tconn)->pub.conn->id, \
637 (_tconn), \
638 (_tconn)->pub.conn, \
639 (_tconn)->pub.trunk->uctx); \
640 (_tconn)->pub.trunk->in_handler = (void *)(_tconn)->pub.trunk->funcs.request_demux; \
641 (_tconn)->pub.trunk->funcs.request_demux((_tconn)->pub.trunk->el, (_tconn), (_tconn)->pub.conn, (_tconn)->pub.trunk->uctx); \
642 (_tconn)->pub.trunk->in_handler = _prev; \
643} while(0)
644
645/** Write one or more cancellation requests to a connection
646 *
647 */
648#define DO_REQUEST_CANCEL_MUX(_tconn) \
649do { \
650 if ((_tconn)->pub.trunk->funcs.request_cancel_mux) { \
651 void *_prev = (_tconn)->pub.trunk->in_handler; \
652 DEBUG3("[%" PRIu64 "] Calling request_cancel_mux(tconn=%p, conn=%p, uctx=%p)", \
653 (_tconn)->pub.conn->id, \
654 (_tconn), \
655 (_tconn)->pub.conn, \
656 (_tconn)->pub.trunk->uctx); \
657 (_tconn)->pub.trunk->in_handler = (void *)(_tconn)->pub.trunk->funcs.request_cancel_mux; \
658 (_tconn)->pub.trunk->funcs.request_cancel_mux((_tconn)->pub.trunk->el, (_tconn), (_tconn)->pub.conn, (_tconn)->pub.trunk->uctx); \
659 (_tconn)->pub.trunk->in_handler = _prev; \
660 } \
661} while(0)
662
663/** Allocate a new connection
664 *
665 */
666#define DO_CONNECTION_ALLOC(_tconn) \
667do { \
668 void *_prev = trunk->in_handler; \
669 DEBUG3("Calling connection_alloc(tconn=%p, el=%p, conf=%p, log_prefix=\"%s\", uctx=%p)", \
670 (_tconn), \
671 (_tconn)->pub.trunk->el, \
672 (_tconn)->pub.trunk->conf.conn_conf, \
673 trunk->log_prefix, \
674 (_tconn)->pub.trunk->uctx); \
675 (_tconn)->pub.trunk->in_handler = (void *) (_tconn)->pub.trunk->funcs.connection_alloc; \
676 (_tconn)->pub.conn = trunk->funcs.connection_alloc((_tconn), (_tconn)->pub.trunk->el, (_tconn)->pub.trunk->conf.conn_conf, (_tconn)->pub.trunk->log_prefix, trunk->uctx); \
677 (_tconn)->pub.trunk->in_handler = _prev; \
678 if (!(_tconn)->pub.conn) { \
679 ERROR("Failed creating new connection"); \
680 talloc_free(tconn); \
681 return -1; \
682 } \
683} while(0)
684
685/** Change what events the connection should be notified about
686 *
687 */
688#define DO_CONNECTION_NOTIFY(_tconn, _events) \
689do { \
690 if ((_tconn)->pub.trunk->funcs.connection_notify) { \
691 void *_prev = (_tconn)->pub.trunk->in_handler; \
692 DEBUG3("[%" PRIu64 "] Calling connection_notify(tconn=%p, conn=%p, el=%p, events=%s, uctx=%p)", \
693 (_tconn)->pub.conn->id, \
694 (_tconn), \
695 (_tconn)->pub.conn, \
696 (_tconn)->pub.trunk->el, \
697 fr_table_str_by_value(trunk_connection_events, (_events), "<INVALID>"), \
698 (_tconn)->pub.trunk->uctx); \
699 (_tconn)->pub.trunk->in_handler = (void *)(_tconn)->pub.trunk->funcs.connection_notify; \
700 (_tconn)->pub.trunk->funcs.connection_notify((_tconn), (_tconn)->pub.conn, (_tconn)->pub.trunk->el, (_events), (_tconn)->pub.trunk->uctx); \
701 (_tconn)->pub.trunk->in_handler = _prev; \
702 } \
703} while(0)
704
705#define IN_HANDLER(_trunk) (((_trunk)->in_handler) != NULL)
706#define IN_REQUEST_MUX(_trunk) (((_trunk)->funcs.request_mux) && ((_trunk)->in_handler == (void *)(_trunk)->funcs.request_mux))
707#define IN_REQUEST_DEMUX(_trunk) (((_trunk)->funcs.request_demux) && ((_trunk)->in_handler == (void *)(_trunk)->funcs.request_demux))
708#define IN_REQUEST_CANCEL_MUX(_trunk) (((_trunk)->funcs.request_cancel_mux) && ((_trunk)->in_handler == (void *)(_trunk)->funcs.request_cancel_mux))
709
710#define IS_SERVICEABLE(_tconn) ((_tconn)->pub.state & TRUNK_CONN_SERVICEABLE)
711#define IS_PROCESSING(_tconn) ((tconn)->pub.state & TRUNK_CONN_PROCESSING)
712
713/** Remove the current request from the backlog
714 *
715 */
716#define REQUEST_EXTRACT_BACKLOG(_treq) \
717do { \
718 int _ret; \
719 _ret = fr_heap_extract(&(_treq)->pub.trunk->backlog, _treq); \
720 if (!fr_cond_assert_msg(_ret == 0, "Failed extracting conn from backlog heap: %s", fr_strerror())) break; \
721} while (0)
722
723/** Remove the current request from the pending list
724 *
725 */
726#define REQUEST_EXTRACT_PENDING(_treq) \
727do { \
728 int _ret; \
729 _ret = fr_heap_extract(&(_treq)->pub.tconn->pending, _treq); \
730 if (!fr_cond_assert_msg(_ret == 0, "Failed extracting conn from pending heap: %s", fr_strerror())) break; \
731} while (0)
732
733/** Remove the current request from the partial slot
734 *
735 */
736#define REQUEST_EXTRACT_PARTIAL(_treq) \
737do { \
738 fr_assert((_treq)->pub.tconn->partial == treq); \
739 tconn->partial = NULL; \
740} while (0)
741
742/** Remove the current request from the sent list
743 *
744 */
745#define REQUEST_EXTRACT_SENT(_treq) fr_dlist_remove(&tconn->sent, treq)
746
747/** Remove the current request from the reapable list
748 *
749 */
750#define REQUEST_EXTRACT_REAPABLE(_treq) fr_dlist_remove(&tconn->reapable, treq)
751
752/** Remove the current request from the cancel list
753 *
754 */
755#define REQUEST_EXTRACT_CANCEL(_treq) fr_dlist_remove(&tconn->cancel, treq)
756
757/** Remove the current request from the cancel_partial slot
758 *
759 */
760#define REQUEST_EXTRACT_CANCEL_PARTIAL(_treq) \
761do { \
762 fr_assert((_treq)->pub.tconn->cancel_partial == treq); \
763 tconn->cancel_partial = NULL; \
764} while (0)
765
766/** Remove the current request from the cancel sent list
767 *
768 */
769#define REQUEST_EXTRACT_CANCEL_SENT(_treq) fr_dlist_remove(&tconn->cancel_sent, treq)
770
771/** Reorder the connections in the active heap
772 *
773 * fr_heap_extract will also error out if heap_id is bad - no need for assert
774 */
775#define CONN_REORDER(_tconn) \
776do { \
777 int _ret; \
778 if ((fr_minmax_heap_num_elements((_tconn)->pub.trunk->active) == 1)) break; \
779 if (!fr_cond_assert((_tconn)->pub.state == TRUNK_CONN_ACTIVE)) break; \
780 _ret = fr_minmax_heap_extract((_tconn)->pub.trunk->active, (_tconn)); \
781 if (!fr_cond_assert_msg(_ret == 0, "Failed extracting conn from active heap: %s", fr_strerror())) break; \
782 fr_minmax_heap_insert((_tconn)->pub.trunk->active, (_tconn)); \
783} while (0)
784
785/** Call a list of watch functions associated with a state
786 *
787 */
789{
790 /*
791 * Nested watcher calls are not allowed
792 * and shouldn't be possible because of
793 * deferred signal processing.
794 */
795 fr_assert(trunk->next_watcher == NULL);
796
797 while ((trunk->next_watcher = fr_dlist_next(list, trunk->next_watcher))) {
798 trunk_watch_entry_t *entry = trunk->next_watcher;
799 bool oneshot = entry->oneshot; /* Watcher could be freed, so store now */
800
801 if (!entry->enabled) continue;
802 if (oneshot) trunk->next_watcher = fr_dlist_remove(list, entry);
803
804 entry->func(trunk, trunk->pub.state, state, entry->uctx);
805
806 if (oneshot) talloc_free(entry);
807 }
808 trunk->next_watcher = NULL;
809}
810
811/** Call the state change watch functions
812 *
813 */
814#define CALL_WATCHERS(_trunk, _state) \
815do { \
816 if (fr_dlist_empty(&(_trunk)->watch[_state])) break; \
817 trunk_watch_call((_trunk), &(_trunk)->watch[_state], _state); \
818} while(0)
819
820/** Remove a watch function from a trunk state list
821 *
822 * @param[in] trunk The trunk to remove the watcher from.
823 * @param[in] state to remove the watch from.
824 * @param[in] watch Function to remove.
825 * @return
826 * - 0 if the function was removed successfully.
827 * - -1 if the function wasn't present in the watch list.
828 * - -2 if an invalid state was passed.
829 */
831{
832 trunk_watch_entry_t *entry = NULL;
833 fr_dlist_head_t *list;
834
835 if (state >= TRUNK_STATE_MAX) return -2;
836
837 list = &trunk->watch[state];
838 while ((entry = fr_dlist_next(list, entry))) {
839 if (entry->func == watch) {
840 if (trunk->next_watcher == entry) {
841 trunk->next_watcher = fr_dlist_remove(list, entry);
842 } else {
843 fr_dlist_remove(list, entry);
844 }
845 talloc_free(entry);
846 return 0;
847 }
848 }
849
850 return -1;
851}
852
853/** Add a watch entry to the trunk state list
854 *
855 * @param[in] trunk The trunk to add the watcher to.
856 * @param[in] state to watch for.
857 * @param[in] watch Function to add.
858 * @param[in] oneshot Should this watcher only be run once.
859 * @param[in] uctx Context to pass to function.
860 * @return
861 * - NULL if an invalid state is passed.
862 * - A new watch entry handle on success.
863 */
865 trunk_watch_t watch, bool oneshot, void const *uctx)
866{
867 trunk_watch_entry_t *entry;
868 fr_dlist_head_t *list;
869
870 if (state >= TRUNK_STATE_MAX) return NULL;
871
872 list = &trunk->watch[state];
873 MEM(entry = talloc_zero(trunk, trunk_watch_entry_t));
874
875 entry->func = watch;
876 entry->oneshot = oneshot;
877 entry->enabled = true;
878 memcpy(&entry->uctx, &uctx, sizeof(entry->uctx));
879 fr_dlist_insert_tail(list, entry);
880
881 return entry;
882}
883
884#define TRUNK_STATE_TRANSITION(_new) \
885do { \
886 DEBUG3("Trunk changed state %s -> %s", \
887 fr_table_str_by_value(trunk_states, trunk->pub.state, "<INVALID>"), \
888 fr_table_str_by_value(trunk_states, _new, "<INVALID>")); \
889 CALL_WATCHERS(trunk, _new); \
890 trunk->pub.state = _new; \
891} while (0)
892
893static void trunk_request_enter_backlog(trunk_request_t *treq, bool new);
894static void trunk_request_enter_pending(trunk_request_t *treq, trunk_connection_t *tconn, bool new);
903
904static uint64_t trunk_requests_per_connection(uint16_t *conn_count_out, uint32_t *req_conn_out,
905 trunk_t *trunk, fr_time_t now, NDEBUG_UNUSED bool verify);
906
907static int trunk_connection_spawn(trunk_t *trunk, fr_time_t now);
908static inline void trunk_connection_auto_full(trunk_connection_t *tconn);
909static inline void trunk_connection_auto_unfull(trunk_connection_t *tconn);
910static inline void trunk_connection_readable(trunk_connection_t *tconn);
911static inline void trunk_connection_writable(trunk_connection_t *tconn);
919
920static void trunk_rebalance(trunk_t *trunk);
921static void trunk_manage(trunk_t *trunk, fr_time_t now);
922static void _trunk_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx);
923static void trunk_backlog_drain(trunk_t *trunk);
924
925/** Compare two protocol requests
926 *
927 * Allows protocol requests to be prioritised with a function
928 * specified by the API client. Defaults to by pointer address
929 * if no function is specified.
930 *
931 * @param[in] a treq to compare to b.
932 * @param[in] b treq to compare to a.
933 * @return
934 * - +1 if a > b.
935 * - 0 if a == b.
936 * - -1 if a < b.
937 */
938static int8_t _trunk_request_prioritise(void const *a, void const *b)
939{
942
943 fr_assert(treq_a->pub.trunk == treq_b->pub.trunk);
944
945 return treq_a->pub.trunk->funcs.request_prioritise(treq_a->pub.preq, treq_b->pub.preq);
946}
947
948/** Remove a request from all connection lists
949 *
950 * A common function used by init, fail, complete state functions to disassociate
951 * a request from a connection in preparation for freeing or reassignment.
952 *
953 * Despite its unassuming name, this function is *the* place to put calls to
954 * functions which need to be called when the number of requests associated with
955 * a connection changes.
956 *
957 * Trunk requests will always be passed to this function before they're removed
958 * from a connection, even if the requests are being freed.
959 *
960 * @param[in] treq to trigger a state change for.
961 */
963{
964 trunk_connection_t *tconn = treq->pub.tconn;
965 trunk_t *trunk = treq->pub.trunk;
966
967 if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
968
969 switch (treq->pub.state) {
971 return; /* Not associated with connection */
972
975 break;
976
979 break;
980
983 break;
984
987 break;
988
991 break;
992
995 break;
996
999 break;
1000
1001 default:
1002 fr_assert(0);
1003 break;
1004 }
1005
1006 /*
1007 * If the request wasn't associated with a
1008 * connection, then there's nothing more
1009 * to do.
1010 */
1011 if (!tconn) return;
1012
1013 {
1014 request_t *request = treq->pub.request;
1015
1016 ROPTIONAL(RDEBUG3, DEBUG3, "%s Trunk connection released request %" PRIu64,
1017 tconn->pub.conn->name, treq->id);
1018 }
1019 /*
1020 * Release any connection specific resources the
1021 * treq holds.
1022 */
1024
1025 switch (tconn->pub.state){
1026 case TRUNK_CONN_FULL:
1027 trunk_connection_auto_unfull(tconn); /* Check if we can switch back to active */
1028 if (tconn->pub.state == TRUNK_CONN_FULL) break; /* Only fallthrough if conn is now active */
1030
1031 case TRUNK_CONN_ACTIVE:
1032 CONN_REORDER(tconn);
1033 break;
1034
1035 default:
1036 break;
1037 }
1038
1039 treq->pub.tconn = NULL;
1040
1041 /*
1042 * Request removed from the connection
1043 * see if we need up deregister I/O events.
1044 */
1046}
1047
1048/** Transition a request to the unassigned state, in preparation for re-assignment
1049 *
1050 * @note treq->tconn may be inviable after calling
1051 * if treq->conn and connection_signals_pause are not used.
1052 * This is due to call to trunk_request_remove_from_conn.
1053 *
1054 * @param[in] treq to trigger a state change for.
1055 */
1057{
1058 trunk_t *trunk = treq->pub.trunk;
1059
1060 switch (treq->pub.state) {
1062 return;
1063
1066 break;
1067
1073 break;
1074
1075 default:
1077 }
1078
1080}
1081
1082/** Transition a request to the backlog state, adding it to the backlog of the trunk
1083 *
1084 * @note treq->tconn and treq may be inviable after calling
1085 * if treq->conn and connection_signals_pause are not used.
1086 * This is due to call to trunk_manage.
1087 *
1088 * @param[in] treq to trigger a state change for.
1089 * @param[in] new Whether this is a new request.
1090 */
1092{
1093 trunk_connection_t *tconn = treq->pub.tconn;
1094 trunk_t *trunk = treq->pub.trunk;
1095
1096 switch (treq->pub.state) {
1099 break;
1100
1103 break;
1104
1107 break;
1108
1109 default:
1111 }
1112
1114 fr_heap_insert(&trunk->backlog, treq); /* Insert into the backlog heap */
1115
1116 /*
1117 * A new request has entered the trunk.
1118 * Re-calculate request/connection ratios.
1119 */
1120 if (new) trunk_requests_per_connection(NULL, NULL, trunk, fr_time(), false);
1121
1122 /*
1123 * To reduce latency, if there's no connections
1124 * in the connecting state, call the trunk manage
1125 * function immediately.
1126 *
1127 * Likewise, if there's draining connections
1128 * which could be moved back to active call
1129 * the trunk manage function.
1130 *
1131 * Remember requests only enter the backlog if
1132 * there's no connections which can service them.
1133 */
1137 }
1138}
1139
1140/** Transition a request to the pending state, adding it to the backlog of an active connection
1141 *
1142 * All trunk requests being added to a connection get passed to this function.
1143 * All trunk requests being removed from a connection get passed to #trunk_request_remove_from_conn.
1144 *
1145 * @note treq->tconn and treq may be inviable after calling
1146 * if treq->conn and connection_signals_pause is not used.
1147 * This is due to call to trunk_connection_event_update.
1148 *
1149 * @param[in] treq to trigger a state change for.
1150 * @param[in] tconn to enqueue the request on.
1151 * @param[in] new Whether this is a new request.
1152 */
1154{
1155 trunk_t *trunk = treq->pub.trunk;
1156
1157 fr_assert(tconn->pub.trunk == trunk);
1158 fr_assert(IS_PROCESSING(tconn));
1159
1160 switch (treq->pub.state) {
1163 fr_assert(!treq->pub.tconn);
1164 break;
1165
1167 fr_assert(!treq->pub.tconn);
1169 break;
1170
1171 case TRUNK_REQUEST_STATE_CANCEL: /* Moved from another connection */
1173 break;
1174
1175 default:
1177 }
1178
1179 /*
1180 * Assign the new connection first this first so
1181 * it appears in the state log.
1182 */
1183 treq->pub.tconn = tconn;
1184
1186
1187 {
1188 request_t *request = treq->pub.request;
1189
1190 ROPTIONAL(RDEBUG, DEBUG3, "%s Trunk connection assigned request %"PRIu64,
1191 tconn->pub.conn->name, treq->id);
1192 }
1193 fr_heap_insert(&tconn->pending, treq);
1194
1195 /*
1196 * A new request has entered the trunk.
1197 * Re-calculate request/connection ratios.
1198 */
1199 if (new) trunk_requests_per_connection(NULL, NULL, trunk, fr_time(), false);
1200
1201 /*
1202 * Check if we need to automatically transition the
1203 * connection to full.
1204 */
1206
1207 /*
1208 * Reorder the connection in the heap now it has an
1209 * additional request.
1210 */
1211 if (tconn->pub.state == TRUNK_CONN_ACTIVE) CONN_REORDER(tconn);
1212
1213 /*
1214 * We have a new request, see if we need to register
1215 * for I/O events.
1216 */
1218}
1219
1220/** Transition a request to the partial state, indicating that is has been partially sent
1221 *
1222 * @param[in] treq to trigger a state change for.
1223 */
1225{
1226 trunk_connection_t *tconn = treq->pub.tconn;
1227 trunk_t *trunk = treq->pub.trunk;
1228
1229 if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
1230
1231 switch (treq->pub.state) {
1232 case TRUNK_REQUEST_STATE_PENDING: /* All requests go through pending, even requeued ones */
1234 break;
1235
1236 default:
1238 }
1239
1240 fr_assert(!tconn->partial);
1241 tconn->partial = treq;
1242
1244}
1245
1246/** Transition a request to the sent state, indicating that it's been sent in its entirety
1247 *
1248 * @note treq->tconn and treq may be inviable after calling
1249 * if treq->conn and connection_signals_pause is not used.
1250 * This is due to call to trunk_connection_event_update.
1251 *
1252 * @param[in] treq to trigger a state change for.
1253 */
1255{
1256 trunk_connection_t *tconn = treq->pub.tconn;
1257 trunk_t *trunk = treq->pub.trunk;
1258
1259 if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
1260
1261 switch (treq->pub.state) {
1264 break;
1265
1268 break;
1269
1270 default:
1272 }
1273
1275 fr_dlist_insert_tail(&tconn->sent, treq);
1276
1277 /*
1278 * Update the connection's sent stats if this is the
1279 * first time this request is being sent.
1280 */
1281 if (!treq->sent) {
1282 trunk->pub.last_write_success = fr_time();
1283
1285 tconn->sent_count++;
1286 treq->sent = true;
1287
1288 /*
1289 * Enforces max_uses
1290 */
1291 if ((trunk->conf.max_uses > 0) && (tconn->sent_count >= trunk->conf.max_uses)) {
1293 }
1294 }
1295
1296 /*
1297 * We just sent a request, we probably need
1298 * to tell the event loop we want to be
1299 * notified if there's data available.
1300 */
1302}
1303
1304/** Transition a request to the reapable state, indicating that it's been sent in its entirety, but no response is expected
1305 *
1306 * @note Largely a replica of trunk_request_enter_sent.
1307 *
1308 * @param[in] treq to trigger a state change for.
1309 */
1311{
1312 trunk_connection_t *tconn = treq->pub.tconn;
1313 trunk_t *trunk = treq->pub.trunk;
1314
1315 if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
1316
1317 switch (treq->pub.state) {
1320 break;
1321
1324 break;
1325
1326 default:
1328 }
1329
1331 fr_dlist_insert_tail(&tconn->reapable, treq);
1332
1333 if (!treq->sent) {
1334 tconn->sent_count++;
1335 treq->sent = true;
1336
1337 if ((trunk->conf.max_uses > 0) && (tconn->sent_count >= trunk->conf.max_uses)) {
1339 }
1340 }
1341
1343}
1344
1345/** Transition a request to the cancel state, placing it in a connection's cancellation list
1346 *
1347 * If a request_cancel_send callback is provided, that callback will
1348 * be called periodically for requests which were cancelled due to
1349 * a signal.
1350 *
1351 * The request_cancel_send callback will dequeue cancelled requests
1352 * and inform a remote server that the result is no longer required.
1353 *
1354 * A request must enter this state before being added to the backlog
1355 * of another connection if it's been sent or partially sent.
1356 *
1357 * @note treq->tconn and treq may be inviable after calling
1358 * if treq->conn and connection_signals_pause is not used.
1359 * This is due to call to trunk_connection_event_update.
1360 *
1361 * @param[in] treq to trigger a state change for.
1362 * @param[in] reason Why the request was cancelled.
1363 * Should be one of:
1364 * - TRUNK_CANCEL_REASON_SIGNAL request cancelled
1365 * because of a signal from the interpreter.
1366 * - TRUNK_CANCEL_REASON_MOVE request cancelled
1367 * because the connection failed and it needs
1368 * to be assigned to a new connection.
1369 * - TRUNK_CANCEL_REASON_REQUEUE request cancelled
1370 * as it needs to be resent on the same connection.
1371 */
1373{
1374 trunk_connection_t *tconn = treq->pub.tconn;
1375 trunk_t *trunk = treq->pub.trunk;
1376
1377 if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
1378
1379 switch (treq->pub.state) {
1382 break;
1383
1386 break;
1387
1390 break;
1391
1392 default:
1394 }
1395
1397 fr_dlist_insert_tail(&tconn->cancel, treq);
1398 treq->cancel_reason = reason;
1399
1400 DO_REQUEST_CANCEL(treq, reason);
1401
1402 /*
1403 * Our treq is no longer bound to an actual
1404 * request_t *, as we can't guarantee the
1405 * lifetime of the original request_t *.
1406 */
1407 if (treq->cancel_reason == TRUNK_CANCEL_REASON_SIGNAL) treq->pub.request = NULL;
1408
1409 /*
1410 * Register for I/O write events if we need to.
1411 */
1413}
1414
1415/** Transition a request to the cancel_partial state, placing it in a connection's cancel_partial slot
1416 *
1417 * The request_demux function is then responsible for signalling
1418 * that the cancel request is complete when the remote server
1419 * acknowledges the cancellation request.
1420 *
1421 * @param[in] treq to trigger a state change for.
1422 */
1424{
1425 trunk_connection_t *tconn = treq->pub.tconn;
1426 trunk_t *trunk = treq->pub.trunk;
1427
1428 if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
1431
1432 switch (treq->pub.state) {
1433 case TRUNK_REQUEST_STATE_CANCEL: /* The only valid state cancel_sent can be reached from */
1435 break;
1436
1437 default:
1439 }
1440
1442 fr_assert(!tconn->partial);
1443 tconn->cancel_partial = treq;
1444}
1445
1446/** Transition a request to the cancel_sent state, placing it in a connection's cancel_sent list
1447 *
1448 * The request_demux function is then responsible for signalling
1449 * that the cancel request is complete when the remote server
1450 * acknowledges the cancellation request.
1451 *
1452 * @note treq->tconn and treq may be inviable after calling
1453 * if treq->conn and connection_signals_pause is not used.
1454 * This is due to call to trunk_connection_event_update.
1455 *
1456 * @param[in] treq to trigger a state change for.
1457 */
1459{
1460 trunk_connection_t *tconn = treq->pub.tconn;
1461 trunk_t *trunk = treq->pub.trunk;
1462
1463 if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
1466
1467 switch (treq->pub.state) {
1470 break;
1471
1474 break;
1475
1476 default:
1478 }
1479
1481 fr_dlist_insert_tail(&tconn->cancel_sent, treq);
1482
1483 /*
1484 * De-register for I/O write events
1485 * and register the read events
1486 * to drain the cancel ACKs.
1487 */
1489}
1490
1491/** Cancellation was acked, the request is complete, free it
1492 *
1493 * The API client will not be informed, as the original request_t *
1494 * will likely have been freed by this point.
1495 *
1496 * @note treq will be inviable after a call to this function.
1497 * treq->tconn may be inviable after calling
1498 * if treq->conn and connection_signals_pause is not used.
1499 * This is due to call to trunk_request_remove_from_conn.
1500 *
1501 * @param[in] treq to mark as complete.
1502 */
1504{
1505 trunk_connection_t *tconn = treq->pub.tconn;
1506 trunk_t *trunk = treq->pub.trunk;
1507
1508 if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
1509 if (!fr_cond_assert(!treq->pub.request)) return; /* Only a valid state for request_t * which have been cancelled */
1510
1511 switch (treq->pub.state) {
1514 break;
1515
1516 default:
1518 }
1519
1521
1523 trunk_request_free(&treq); /* Free the request */
1524}
1525
1526/** Request completed successfully, inform the API client and free the request
1527 *
1528 * @note treq will be inviable after a call to this function.
1529 * treq->tconn may also be inviable due to call to
1530 * trunk_request_remove_from_conn.
1531 *
1532 * @param[in] treq to mark as complete.
1533 */
1535{
1536 trunk_connection_t *tconn = treq->pub.tconn;
1537 trunk_t *trunk = treq->pub.trunk;
1538
1539 if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
1540
1541 switch (treq->pub.state) {
1546 break;
1547
1548 default:
1550 }
1551
1553 DO_REQUEST_COMPLETE(treq);
1554 trunk_request_free(&treq); /* Free the request */
1555}
1556
1557/** Request failed, inform the API client and free the request
1558 *
1559 * @note treq will be inviable after a call to this function.
1560 * treq->tconn may also be inviable due to call to
1561 * trunk_request_remove_from_conn.
1562 *
1563 * @param[in] treq to mark as failed.
1564 */
1566{
1567 trunk_connection_t *tconn = treq->pub.tconn;
1568 trunk_t *trunk = treq->pub.trunk;
1569 trunk_request_state_t prev = treq->pub.state;
1570
1571 if (!fr_cond_assert(!tconn || (tconn->pub.trunk == trunk))) return;
1572
1573 switch (treq->pub.state) {
1576 break;
1577
1578 default:
1580 break;
1581 }
1582
1584 DO_REQUEST_FAIL(treq, prev);
1585 trunk_request_free(&treq); /* Free the request */
1586}
1587
1588/** Check to see if a trunk request can be enqueued
1589 *
1590 * @param[out] tconn_out Connection the request may be enqueued on.
1591 * @param[in] trunk To enqueue requests on.
1592 * @param[in] request associated with the treq (if any).
1593 * @return
1594 * - TRUNK_ENQUEUE_OK caller should enqueue request on provided tconn.
1595 * - TRUNK_ENQUEUE_IN_BACKLOG Request should be queued in the backlog.
1596 * - TRUNK_ENQUEUE_NO_CAPACITY Unable to enqueue request as we have no spare
1597 * connections or backlog space.
1598 * - TRUNK_ENQUEUE_DST_UNAVAILABLE Can't enqueue because the destination is
1599 * unreachable.
1600 */
1602 request_t *request)
1603{
1604 trunk_connection_t *tconn;
1605 /*
1606 * If we have an active connection then
1607 * return that.
1608 */
1609 tconn = fr_minmax_heap_min_peek(trunk->active);
1610 if (tconn) {
1611 *tconn_out = tconn;
1612 return TRUNK_ENQUEUE_OK;
1613 }
1614
1615 /*
1616 * Unlike the connection pool, we don't need
1617 * to drive any internal processes by feeding
1618 * it requests.
1619 *
1620 * If the last event to occur was a failure
1621 * we refuse to enqueue new requests until
1622 * one or more connections comes online.
1623 */
1624 if (!trunk->conf.backlog_on_failed_conn &&
1625 fr_time_gt(trunk->pub.last_failed, fr_time_wrap(0)) &&
1626 fr_time_lt(trunk->pub.last_connected, trunk->pub.last_failed)) {
1628 RWARN, WARN, "Refusing to enqueue requests - "
1629 "No active connections and last event was a connection failure");
1630
1632 }
1633
1634
1635 /*
1636 * Only enforce if we're limiting maximum
1637 * number of connections, and maximum
1638 * number of requests per connection.
1639 *
1640 * The alloc function also checks this
1641 * which is why this is only done for
1642 * debug builds.
1643 */
1644 if (trunk->conf.max_req_per_conn && trunk->conf.max) {
1645 uint64_t limit;
1646
1647 limit = trunk->conf.max * (uint64_t)trunk->conf.max_req_per_conn;
1648 if (limit > 0) {
1649 uint64_t total_reqs;
1650
1651 total_reqs = trunk_request_count_by_state(trunk, TRUNK_CONN_ALL,
1653 if (total_reqs >= (limit + trunk->conf.max_backlog)) {
1655 RWARN, WARN, "Refusing to alloc requests - "
1656 "Limit of %"PRIu64" (max = %u * per_connection_max = %u) "
1657 "plus %u backlog requests reached",
1658 limit, trunk->conf.max, trunk->conf.max_req_per_conn,
1659 trunk->conf.max_backlog);
1661 }
1662 }
1663 }
1664
1666}
1667
1668/** Enqueue a request which has never been assigned to a connection or was previously cancelled
1669 *
1670 * @param[in] treq to re enqueue. Must have been removed
1671 * from its existing connection with
1672 * #trunk_connection_requests_dequeue.
1673 * @return
1674 * - TRUNK_ENQUEUE_OK Request was re-enqueued.
1675 * - TRUNK_ENQUEUE_NO_CAPACITY Request enqueueing failed because we're at capacity.
1676 * - TRUNK_ENQUEUE_DST_UNAVAILABLE Enqueuing failed for some reason.
1677 * Usually because the connection to the resource is down.
1678 */
1680{
1681 trunk_t *trunk = treq->pub.trunk;
1682 trunk_connection_t *tconn = NULL;
1683 trunk_enqueue_t ret;
1684
1685 /*
1686 * Must *NOT* still be assigned to another connection
1687 */
1688 fr_assert(!treq->pub.tconn);
1689
1690 ret = trunk_request_check_enqueue(&tconn, trunk, treq->pub.request);
1691 switch (ret) {
1692 case TRUNK_ENQUEUE_OK:
1693 if (trunk->conf.always_writable) {
1695 trunk_request_enter_pending(treq, tconn, false);
1698 } else {
1699 trunk_request_enter_pending(treq, tconn, false);
1700 }
1701 break;
1702
1704 /*
1705 * No more connections and request
1706 * is already in the backlog.
1707 *
1708 * Signal our caller it should stop
1709 * trying to drain the backlog.
1710 */
1712 trunk_request_enter_backlog(treq, false);
1713 break;
1714
1715 default:
1716 break;
1717 }
1718
1719 return ret;
1720}
1721
1722/** Shift requests in the specified states onto new connections
1723 *
1724 * This function will blindly dequeue any requests in the specified state and get
1725 * them back to the unassigned state, cancelling any sent or partially sent requests.
1726 *
1727 * This function does not check that dequeuing a request in a particular state is a
1728 * sane or sensible thing to do, that's up to the caller!
1729 *
1730 * @param[out] out A list to insert the newly dequeued and unassigned
1731 * requests into.
1732 * @param[in] tconn to dequeue requests from.
1733 * @param[in] states Dequeue request in these states.
1734 * @param[in] max The maximum number of requests to dequeue. 0 for unlimited.
1735 */
1737 int states, uint64_t max)
1738{
1739 trunk_request_t *treq;
1740 uint64_t count = 0;
1741
1742 if (max == 0) max = UINT64_MAX;
1743
1744#define OVER_MAX_CHECK if (++count > max) return (count - 1)
1745
1746#define DEQUEUE_ALL(_src_list, _state) do { \
1747 while ((treq = fr_dlist_head(_src_list))) { \
1748 OVER_MAX_CHECK; \
1749 fr_assert(treq->pub.state == (_state)); \
1750 trunk_request_enter_unassigned(treq); \
1751 fr_dlist_insert_tail(out, treq); \
1752 } } while (0)
1753
1754 /*
1755 * Don't need to do anything with
1756 * cancellation requests.
1757 */
1758 if (states & TRUNK_REQUEST_STATE_CANCEL) DEQUEUE_ALL(&tconn->cancel,
1760
1761 /*
1762 * ...same with cancel inform
1763 */
1766
1767 /*
1768 * ....same with cancel partial
1769 */
1772 treq = tconn->cancel_partial;
1773 if (treq) {
1777 }
1778 }
1779
1780 /*
1781 * ...and pending.
1782 */
1783 if (states & TRUNK_REQUEST_STATE_PENDING) {
1784 while ((treq = fr_heap_peek(tconn->pending))) {
1789 }
1790 }
1791
1792 /*
1793 * Cancel partially sent requests
1794 */
1795 if (states & TRUNK_REQUEST_STATE_PARTIAL) {
1797 treq = tconn->partial;
1798 if (treq) {
1800
1801 /*
1802 * Don't allow the connection to change state whilst
1803 * we're draining requests from it.
1804 */
1810 }
1811 }
1812
1813 /*
1814 * Cancel sent requests
1815 */
1816 if (states & TRUNK_REQUEST_STATE_SENT) {
1817 /*
1818 * Don't allow the connection to change state whilst
1819 * we're draining requests from it.
1820 */
1822 while ((treq = fr_dlist_head(&tconn->sent))) {
1825
1829 }
1831 }
1832
1833 return count;
1834}
1835
1836/** Remove requests in specified states from a connection, attempting to distribute them to new connections
1837 *
1838 * @param[in] tconn To remove requests from.
1839 * @param[in] states One or more states or'd together.
1840 * @param[in] max The maximum number of requests to dequeue.
1841 * 0 for unlimited.
1842 * @param[in] fail_bound If true causes any requests bound to the connection to fail.
1843 * If false bound requests will not be moved.
1844 *
1845 * @return the number of requests re-queued.
1846 */
1847static uint64_t trunk_connection_requests_requeue_priv(trunk_connection_t *tconn, int states, uint64_t max, bool fail_bound)
1848{
1849 trunk_t *trunk = tconn->pub.trunk;
1850 fr_dlist_head_t to_process;
1851 trunk_request_t *treq = NULL;
1852 uint64_t moved = 0;
1853
1854 if (max == 0) max = UINT64_MAX;
1855
1856 fr_dlist_talloc_init(&to_process, trunk_request_t, entry);
1857
1858 /*
1859 * Prevent the connection changing state whilst we're
1860 * working with it.
1861 *
1862 * There's a user callback that can be called by
1863 * trunk_request_enqueue_existing which can reconnect
1864 * the connection.
1865 */
1867
1868 /*
1869 * Remove non-cancelled requests from the connection
1870 */
1871 moved += trunk_connection_requests_dequeue(&to_process, tconn, states & ~TRUNK_REQUEST_STATE_CANCEL_ALL, max);
1872
1873 /*
1874 * Prevent requests being requeued on the same trunk
1875 * connection, which would break rebalancing.
1876 *
1877 * This is a bit of a hack, but nothing should test
1878 * for connection/list consistency in this code,
1879 * and if something is added later, it'll be flagged
1880 * by the tests.
1881 */
1882 if (tconn->pub.state == TRUNK_CONN_ACTIVE) {
1883 int ret;
1884
1885 ret = fr_minmax_heap_extract(trunk->active, tconn);
1886 if (!fr_cond_assert_msg(ret == 0,
1887 "Failed extracting conn from active heap: %s", fr_strerror())) goto done;
1888
1889 }
1890
1891 /*
1892 * Loop over all the requests we gathered and
1893 * redistribute them to new connections.
1894 */
1895 while ((treq = fr_dlist_next(&to_process, treq))) {
1896 trunk_request_t *prev;
1897
1898 prev = fr_dlist_remove(&to_process, treq);
1899
1900 /*
1901 * Attempts to re-queue a request
1902 * that's bound to a connection
1903 * results in a failure.
1904 */
1905 if (treq->bound_to_conn) {
1906 if (fail_bound || !IS_SERVICEABLE(tconn)) {
1908 } else {
1909 trunk_request_enter_pending(treq, tconn, false);
1910 }
1911 goto next;
1912 }
1913
1914 switch (trunk_request_enqueue_existing(treq)) {
1915 case TRUNK_ENQUEUE_OK:
1916 break;
1917
1918 /*
1919 * A connection failed, and
1920 * there's no other connections
1921 * available to deal with the
1922 * load, it's been placed back
1923 * in the backlog.
1924 */
1926 break;
1927
1928 /*
1929 * If we fail to re-enqueue then
1930 * there's nothing to do except
1931 * fail the request.
1932 */
1935 case TRUNK_ENQUEUE_FAIL:
1937 break;
1938 }
1939 next:
1940 treq = prev;
1941 }
1942
1943 /*
1944 * Add the connection back into the active list
1945 */
1946 if (tconn->pub.state == TRUNK_CONN_ACTIVE) {
1947 int ret;
1948
1949 ret = fr_minmax_heap_insert(trunk->active, tconn);
1950 if (!fr_cond_assert_msg(ret == 0,
1951 "Failed re-inserting conn into active heap: %s", fr_strerror())) goto done;
1952 }
1953 if (moved >= max) goto done;
1954
1955 /*
1956 * Deal with the cancelled requests specially we can't
1957 * queue them up again as they were only valid on that
1958 * specific connection.
1959 *
1960 * We just need to run them to completion which, as
1961 * they should already be in the unassigned state,
1962 * just means freeing them.
1963 */
1964 moved += trunk_connection_requests_dequeue(&to_process, tconn,
1965 states & TRUNK_REQUEST_STATE_CANCEL_ALL, max - moved);
1966 while ((treq = fr_dlist_next(&to_process, treq))) {
1967 trunk_request_t *prev;
1968
1969 prev = fr_dlist_remove(&to_process, treq);
1970 trunk_request_free(&treq);
1971 treq = prev;
1972 }
1973
1974done:
1975
1976 /*
1977 * Always re-calculate the request/connection
1978 * ratio at the end.
1979 *
1980 * This avoids having the state transition
1981 * functions do it.
1982 *
1983 * The ratio would be wrong when they calculated
1984 * it anyway, because a bunch of requests are
1985 * dequeued from the connection and temporarily
1986 * cease to exist from the perspective of the
1987 * trunk_requests_per_connection code.
1988 */
1989 trunk_requests_per_connection(NULL, NULL, trunk, fr_time(), false);
1990
1992 return moved;
1993}
1994
1995/** Move requests off of a connection and requeue elsewhere
1996 *
1997 * @note We don't re-queue on draining or draining to free, as requests should have already been
1998 * moved off of the connection. It's also dangerous as the trunk management code main
1999 * clean up a connection in this state when it's run on re-queue, and then the caller
2000 * may try and access a now freed connection.
2001 *
2002 * @param[in] tconn to move requests off of.
2003 * @param[in] states Only move requests in this state.
2004 * @param[in] max The maximum number of requests to dequeue. 0 for unlimited.
2005 * @param[in] fail_bound If true causes any requests bound to the connection to fail.
2006 * If false bound requests will not be moved.
2007 * @return The number of requests requeued.
2008 */
2009uint64_t trunk_connection_requests_requeue(trunk_connection_t *tconn, int states, uint64_t max, bool fail_bound)
2010{
2011 switch (tconn->pub.state) {
2012 case TRUNK_CONN_ACTIVE:
2013 case TRUNK_CONN_FULL:
2015 return trunk_connection_requests_requeue_priv(tconn, states, max, fail_bound);
2016
2017 default:
2018 return 0;
2019 }
2020}
2021
2022/** Signal a partial write
2023 *
2024 * Where there's high load, and the outbound write buffer is full
2025 *
2026 * @param[in] treq to signal state change for.
2027 */
2029{
2030 if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
2031
2033 "%s can only be called from within request_mux handler", __FUNCTION__)) return;
2034
2035 switch (treq->pub.state) {
2038 break;
2039
2040 default:
2041 return;
2042 }
2043}
2044
2045/** Signal that the request was written to a connection successfully
2046 *
2047 * @param[in] treq to signal state change for.
2048 */
2050{
2051 if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
2052
2054 "%s can only be called from within request_mux handler", __FUNCTION__)) return;
2055
2056 switch (treq->pub.state) {
2060 break;
2061
2062 default:
2063 return;
2064 }
2065}
2066
2067/** Signal that the request was written to a connection successfully, but no response is expected
2068 *
2069 * @param[in] treq to signal state change for.
2070 */
2072{
2073 if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
2074
2076 "%s can only be called from within request_mux handler", __FUNCTION__)) return;
2077
2078 switch (treq->pub.state) {
2082 break;
2083
2084 default:
2085 return;
2086 }
2087}
2088
2089/** Signal that a trunk request is complete
2090 *
2091 * The API client will be informed that the request is now complete.
2092 */
2094{
2095 trunk_t *trunk = treq->pub.trunk;
2096
2097 if (!fr_cond_assert_msg(trunk, "treq not associated with trunk")) return;
2098
2099 /*
2100 * We assume that if the request is being signalled
2101 * as complete from the demux function, that it was
2102 * a successful read.
2103 *
2104 * If this assumption turns out to be incorrect
2105 * then we need to add an argument to signal_complete
2106 * to indicate if this is a successful read.
2107 */
2108 if (IN_REQUEST_DEMUX(trunk)) {
2109 trunk_connection_t *tconn = treq->pub.tconn;
2110
2111 trunk->pub.last_read_success = fr_time();
2113 }
2114
2115 switch (treq->pub.state) {
2117 case TRUNK_REQUEST_STATE_PENDING: /* Got immediate response, i.e. cached */
2120 break;
2121
2122 default:
2123 return;
2124 }
2125}
2126
2127/** Signal that a trunk request failed
2128 *
2129 * The API client will be informed that the request has failed.
2130 */
2132{
2133 if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
2134
2136}
2137
2138/** Cancel a trunk request
2139 *
2140 * treq can be in any state, but requests to cancel if the treq is not in
2141 * the TRUNK_REQUEST_STATE_PARTIAL or TRUNK_REQUEST_STATE_SENT state will be ignored.
2142 *
2143 * The complete or failed callbacks will not be called here, as it's assumed the request_t *
2144 * is now inviable as it's being cancelled.
2145 *
2146 * The free function however, is called, and that should be used to perform necessary
2147 * cleanup.
2148 *
2149 * @param[in] treq to signal state change for.
2150 */
2152{
2153 trunk_t *trunk;
2154
2155 /*
2156 * Ensure treq hasn't been freed
2157 */
2158 (void)talloc_get_type_abort(treq, trunk_request_t);
2159
2160 if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
2161
2163 "%s cannot be called within a handler", __FUNCTION__)) return;
2164
2165 trunk = treq->pub.trunk;
2166
2167 switch (treq->pub.state) {
2168 /*
2169 * We don't call the complete or failed callbacks
2170 * as the request and rctx are no longer viable.
2171 */
2174 {
2175 trunk_connection_t *tconn = treq->pub.tconn;
2176
2177 /*
2178 * Don't allow connection state changes
2179 */
2183 "Bad state %s after cancellation",
2184 fr_table_str_by_value(trunk_request_states, treq->pub.state, "<INVALID>"))) {
2186 return;
2187 }
2188 /*
2189 * No cancel muxer. We're done.
2190 *
2191 * If we do have a cancel mux function,
2192 * the next time this connection becomes
2193 * writable, we'll call the cancel mux
2194 * function.
2195 *
2196 * We don't run the complete or failed
2197 * callbacks here as the request is
2198 * being cancelled.
2199 */
2200 if (!trunk->funcs.request_cancel_mux) {
2202 trunk_request_free(&treq);
2203 }
2205 }
2206 break;
2207
2208 /*
2209 * We're already in the process of cancelling a
2210 * request, so ignore duplicate signals.
2211 */
2216 break;
2217
2218 /*
2219 * For any other state, we just release the request
2220 * from its current connection and free it.
2221 */
2222 default:
2224 trunk_request_free(&treq);
2225 break;
2226 }
2227}
2228
2229/** Signal a partial cancel write
2230 *
2231 * Where there's high load, and the outbound write buffer is full
2232 *
2233 * @param[in] treq to signal state change for.
2234 */
2236{
2237 if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
2238
2240 "%s can only be called from within request_cancel_mux handler", __FUNCTION__)) return;
2241
2242 switch (treq->pub.state) {
2245 break;
2246
2247 default:
2248 return;
2249 }
2250}
2251
2252/** Signal that a remote server has been notified of the cancellation
2253 *
2254 * Called from request_cancel_mux to indicate that the datastore has been informed
2255 * that the response is no longer needed.
2256 *
2257 * @param[in] treq to signal state change for.
2258 */
2260{
2261 if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
2262
2264 "%s can only be called from within request_cancel_mux handler", __FUNCTION__)) return;
2265
2266 switch (treq->pub.state) {
2270 break;
2271
2272 default:
2273 break;
2274 }
2275}
2276
2277/** Signal that a remote server acked our cancellation
2278 *
2279 * Called from request_demux to indicate that it got an ack for the cancellation.
2280 *
2281 * @param[in] treq to signal state change for.
2282 */
2284{
2285 if (!fr_cond_assert_msg(treq->pub.trunk, "treq not associated with trunk")) return;
2286
2288 "%s can only be called from within request_demux or request_cancel_mux handlers",
2289 __FUNCTION__)) return;
2290
2291 switch (treq->pub.state) {
2293 /*
2294 * This is allowed, as we may not need to wait
2295 * for the database to ACK our cancellation
2296 * request.
2297 *
2298 * Note: TRUNK_REQUEST_STATE_CANCEL_PARTIAL
2299 * is not allowed here, as that'd mean we'd half
2300 * written the cancellation request out to the
2301 * socket, and then decided to abandon it.
2302 *
2303 * That'd leave the socket in an unusable state.
2304 */
2307 break;
2308
2309 default:
2310 break;
2311 }
2312}
2313
2314/** If the trunk request is freed then update the target requests
2315 *
2316 * gperftools showed calling the request free function directly was slightly faster
2317 * than using talloc_free.
2318 *
2319 * @param[in] treq_to_free request.
2320 */
2322{
2323 trunk_request_t *treq = *treq_to_free;
2324 trunk_t *trunk = treq->pub.trunk;
2325
2326 if (unlikely(!treq)) return;
2327
2328 /*
2329 * The only valid states a trunk request can be
2330 * freed from.
2331 */
2332 switch (treq->pub.state) {
2338 break;
2339
2340 default:
2341 if (!fr_cond_assert(0)) return;
2342 }
2343
2344 /*
2345 * Zero out the pointer to prevent double frees
2346 */
2347 *treq_to_free = NULL;
2348
2349 /*
2350 * Call the API client callback to free
2351 * any associated memory.
2352 */
2353 DO_REQUEST_FREE(treq);
2354
2355 /*
2356 * Update the last above/below target stats
2357 * We only do this when we alloc or free
2358 * connections, or on connection
2359 * state changes.
2360 */
2361 trunk_requests_per_connection(NULL, NULL, treq->pub.trunk, fr_time(), false);
2362
2363 /*
2364 * This tracks the total number of requests
2365 * allocated and not freed or returned to
2366 * the free list.
2367 */
2368 if (fr_cond_assert(trunk->pub.req_alloc > 0)) trunk->pub.req_alloc--;
2369
2370 /*
2371 * No cleanup delay, means cleanup immediately
2372 */
2375
2376#ifndef NDEBUG
2377 /*
2378 * Ensure anything parented off the treq
2379 * is freed. We do this to trigger
2380 * the destructors for the log entries.
2381 */
2382 talloc_free_children(treq);
2383
2384 /*
2385 * State log should now be empty as entries
2386 * remove themselves from the dlist
2387 * on free.
2388 */
2390 "Should have 0 remaining log entries, have %u", fr_dlist_num_elements(&treq->log));
2391#endif
2392
2393 talloc_free(treq);
2394 return;
2395 }
2396
2397 /*
2398 * Ensure anything parented off the treq
2399 * is freed.
2400 */
2401 talloc_free_children(treq);
2402
2403#ifndef NDEBUG
2404 /*
2405 * State log should now be empty as entries
2406 * remove themselves from the dlist
2407 * on free.
2408 */
2410 "Should have 0 remaining log entries, have %u", fr_dlist_num_elements(&treq->log));
2411#endif
2412
2413 /*
2414 *
2415 * Return the trunk request back to the init state.
2416 */
2417 *treq = (trunk_request_t){
2418 .pub = {
2420 .trunk = treq->pub.trunk,
2421 },
2422 .cancel_reason = TRUNK_CANCEL_REASON_NONE,
2423 .last_freed = fr_time(),
2424#ifndef NDEBUG
2425 .log = treq->log /* Keep the list head, to save reinitialisation */
2426#endif
2427 };
2428
2429 /*
2430 * Insert at the head, so that we can free
2431 * requests that have been unused for N
2432 * seconds from the tail.
2433 */
2434 fr_dlist_insert_tail(&trunk->free_requests, treq);
2435}
2436
2437/** Actually free the trunk request
2438 *
2439 */
2441{
2442 trunk_t *trunk = treq->pub.trunk;
2443
2444 switch (treq->pub.state) {
2447 break;
2448
2449 default:
2450 fr_assert(0);
2451 break;
2452 }
2453
2454 fr_dlist_remove(&trunk->free_requests, treq);
2455
2456 return 0;
2457}
2458
2459/** (Pre-)Allocate a new trunk request
2460 *
2461 * If trunk->conf.req_pool_headers or trunk->conf.req_pool_size are not zero then the
2462 * request will be a talloc pool, which can be used to hold the preq.
2463 *
2464 * @note Do not use MEM to check the result of this allocated as it may fail for
2465 * non-fatal reasons.
2466 *
2467 * @param[in] trunk to add request to.
2468 * @param[in] request to wrap in a trunk request (treq).
2469 * @return
2470 * - A newly allocated request.
2471 * - NULL if too many requests are allocated.
2472 */
2474{
2475 trunk_request_t *treq;
2476
2477 /*
2478 * The number of treqs currently allocated
2479 * exceeds the maximum number allowed.
2480 */
2481 if (trunk->conf.max_req_per_conn && trunk->conf.max) {
2482 uint64_t limit;
2483
2484 limit = (uint64_t) trunk->conf.max_req_per_conn * trunk->conf.max;
2485 if (trunk->pub.req_alloc >= (limit + trunk->conf.max_backlog)) {
2487 RWARN, WARN, "Refusing to alloc requests - "
2488 "Limit of %"PRIu64" (max = %u * per_connection_max = %u) "
2489 "plus %u backlog requests reached",
2490 limit, trunk->conf.max, trunk->conf.max_req_per_conn,
2491 trunk->conf.max_backlog);
2492 return NULL;
2493 }
2494 }
2495
2496 /*
2497 * Allocate or reuse an existing request
2498 */
2499 treq = fr_dlist_head(&trunk->free_requests);
2500 if (treq) {
2501 fr_dlist_remove(&trunk->free_requests, treq);
2503 fr_assert(treq->pub.trunk == trunk);
2504 fr_assert(treq->pub.tconn == NULL);
2507 trunk->pub.req_alloc_reused++;
2508 } else {
2510 trunk->conf.req_pool_headers, trunk->conf.req_pool_size));
2511 talloc_set_destructor(treq, _trunk_request_free);
2512
2513 *treq = (trunk_request_t){
2514 .pub = {
2516 .trunk = trunk
2517 },
2518 .cancel_reason = TRUNK_CANCEL_REASON_NONE
2519 };
2520 trunk->pub.req_alloc_new++;
2521#ifndef NDEBUG
2523#endif
2524 }
2525
2526 trunk->pub.req_alloc++;
2528 /* heap_id - initialised when treq inserted into pending */
2529 /* list - empty */
2530 /* preq - populated later */
2531 /* rctx - populated later */
2532 treq->pub.request = request;
2533
2534 return treq;
2535}
2536
2537/** Enqueue a request that needs data written to the trunk
2538 *
2539 * When a request_t * needs to make an asynchronous request to an external datastore
2540 * it should call this function, specifying a preq (protocol request) containing
2541 * the data necessary to request information from the external datastore, and an
2542 * rctx (resume ctx) used to hold the decoded response and/or any error codes.
2543 *
2544 * After a treq is successfully enqueued it will either be assigned immediately
2545 * to the pending queue of a connection, or if no connections are available,
2546 * (depending on the trunk configuration) the treq will be placed in the trunk's
2547 * global backlog.
2548 *
2549 * After receiving a positive return code from this function the caller should
2550 * immediately yield, to allow the various timers and I/O handlers that drive tconn
2551 * (trunk connection) and treq state changes to be called.
2552 *
2553 * When a tconn becomes writable (or the trunk is configured to be always writable)
2554 * the #trunk_request_mux_t callback will be called to dequeue, encode and
2555 * send any pending requests for that tconn. The #trunk_request_mux_t callback
2556 * is also responsible for tracking the outbound requests to allow the
2557 * #trunk_request_demux_t callback to match inbound responses with the original
2558 * treq. Once the #trunk_request_mux_t callback is done processing the treq
2559 * it signals what state the treq should enter next using one of the
2560 * trunk_request_signal_* functions.
2561 *
2562 * When a tconn becomes readable the user specified #trunk_request_demux_t
2563 * callback is called to process any responses, match them with the original treq.
2564 * and signal what state they should enter next using one of the
2565 * trunk_request_signal_* functions.
2566 *
2567 * @param[in,out] treq_out A trunk request handle. If the memory pointed to
2568 * is NULL, a new treq will be allocated.
2569 * Otherwise treq should point to memory allocated
2570 * with trunk_request_alloc.
2571 * @param[in] trunk to enqueue request on.
2572 * @param[in] request to enqueue.
2573 * @param[in] preq Protocol request to write out. Will be freed when
2574 * treq is freed. Should ideally be parented by the
2575 * treq if possible.
2576 * Use #trunk_request_alloc for pre-allocation of
2577 * the treq.
2578 * @param[in] rctx The resume context to write any result to.
2579 * @return
2580 * - TRUNK_ENQUEUE_OK.
2581 * - TRUNK_ENQUEUE_IN_BACKLOG.
2582 * - TRUNK_ENQUEUE_NO_CAPACITY.
2583 * - TRUNK_ENQUEUE_DST_UNAVAILABLE
2584 * - TRUNK_ENQUEUE_FAIL
2585 */
2587 request_t *request, void *preq, void *rctx)
2588{
2589 trunk_connection_t *tconn = NULL;
2590 trunk_request_t *treq;
2591 trunk_enqueue_t ret;
2592
2593 if (!fr_cond_assert_msg(!IN_HANDLER(trunk),
2594 "%s cannot be called within a handler", __FUNCTION__)) return TRUNK_ENQUEUE_FAIL;
2595
2596 if (!fr_cond_assert_msg(!*treq_out || ((*treq_out)->pub.state == TRUNK_REQUEST_STATE_INIT),
2597 "%s requests must be in \"init\" state", __FUNCTION__)) return TRUNK_ENQUEUE_FAIL;
2598
2599 /*
2600 * If delay_start was set, we may need
2601 * to insert the timer for the connection manager.
2602 */
2603 if (unlikely(!trunk->started)) {
2604 if (trunk_start(trunk) < 0) return TRUNK_ENQUEUE_FAIL;
2605 }
2606
2607 ret = trunk_request_check_enqueue(&tconn, trunk, request);
2608 switch (ret) {
2609 case TRUNK_ENQUEUE_OK:
2610 if (*treq_out) {
2611 treq = *treq_out;
2612 } else {
2613 *treq_out = treq = trunk_request_alloc(trunk, request);
2614 if (!treq) return TRUNK_ENQUEUE_FAIL;
2615 }
2616 treq->pub.preq = preq;
2617 treq->pub.rctx = rctx;
2618 if (trunk->conf.always_writable) {
2620 trunk_request_enter_pending(treq, tconn, true);
2623 } else {
2624 trunk_request_enter_pending(treq, tconn, true);
2625 }
2626 break;
2627
2629 if (*treq_out) {
2630 treq = *treq_out;
2631 } else {
2632 *treq_out = treq = trunk_request_alloc(trunk, request);
2633 if (!treq) return TRUNK_ENQUEUE_FAIL;
2634 }
2635 treq->pub.preq = preq;
2636 treq->pub.rctx = rctx;
2637 trunk_request_enter_backlog(treq, true);
2638 break;
2639
2640 default:
2641 /*
2642 * If a trunk request was provided
2643 * populate the preq and rctx fields
2644 * so that if it's freed with
2645 * trunk_request_free, the free
2646 * function works as intended.
2647 */
2648 if (*treq_out) {
2649 treq = *treq_out;
2650 treq->pub.preq = preq;
2651 treq->pub.rctx = rctx;
2652 }
2653 return ret;
2654 }
2655
2656 return ret;
2657}
2658
2659/** Re-enqueue a request on the same connection
2660 *
2661 * If the treq has been sent, we assume that we're being signalled to requeue
2662 * because something outside of the trunk API has determined that a retransmission
2663 * is required. The easiest way to perform that retransmission is to clean up
2664 * any tracking information for the request, and the requeue it for transmission.
2665 *
2666 * IF re-queueing fails, the request will enter the fail state. It should not be
2667 * accessed if this occurs.
2668 *
2669 * @param[in] treq to requeue (retransmit).
2670 * @return
2671 * - TRUNK_ENQUEUE_OK.
2672 * - TRUNK_ENQUEUE_DST_UNAVAILABLE - Connection cannot service requests.
2673 * - TRUNK_ENQUEUE_FAIL - Request isn't in a valid state to be reassigned.
2674 */
2676{
2677 trunk_connection_t *tconn = treq->pub.tconn; /* Existing conn */
2678
2679 if (!tconn) return TRUNK_ENQUEUE_FAIL;
2680
2681 if (!IS_PROCESSING(tconn)) {
2684 }
2685
2686 switch (treq->pub.state) {
2692 trunk_request_enter_pending(treq, tconn, false);
2693 if (treq->pub.trunk->conf.always_writable) {
2695 }
2697 break;
2698
2699 case TRUNK_REQUEST_STATE_BACKLOG: /* Do nothing.... */
2700 case TRUNK_REQUEST_STATE_PENDING: /* Do nothing.... */
2701 break;
2702
2703 default:
2705 return TRUNK_ENQUEUE_FAIL;
2706 }
2707
2708 return TRUNK_ENQUEUE_OK;
2709}
2710
2711/** Enqueue additional requests on a specific connection
2712 *
2713 * This may be used to create a series of requests on a single connection, or to generate
2714 * in-band status checks.
2715 *
2716 * @note If conf->always_writable, then the muxer will be called immediately. The caller
2717 * must be able to handle multiple calls to its muxer gracefully.
2718 *
2719 * @param[in,out] treq_out A trunk request handle. If the memory pointed to
2720 * is NULL, a new treq will be allocated.
2721 * Otherwise treq should point to memory allocated
2722 * with trunk_request_alloc.
2723 * @param[in] tconn to enqueue request on.
2724 * @param[in] request to enqueue.
2725 * @param[in] preq Protocol request to write out. Will be freed when
2726 * treq is freed. Should ideally be parented by the
2727 * treq if possible.
2728 * Use #trunk_request_alloc for pre-allocation of
2729 * the treq.
2730 * @param[in] rctx The resume context to write any result to.
2731 * @param[in] ignore_limits Ignore max_req_per_conn. Useful to force status
2732 * checks through even if the connection is at capacity.
2733 * Will also allow enqueuing on "inactive", "draining",
2734 * "draining-to-free" connections.
2735 * @return
2736 * - TRUNK_ENQUEUE_OK.
2737 * - TRUNK_ENQUEUE_NO_CAPACITY - At max_req_per_conn_limit
2738 * - TRUNK_ENQUEUE_DST_UNAVAILABLE - Connection cannot service requests.
2739 */
2741 request_t *request, void *preq, void *rctx,
2742 bool ignore_limits)
2743{
2744 trunk_request_t *treq;
2745 trunk_t *trunk = tconn->pub.trunk;
2746
2747 if (!fr_cond_assert_msg(!*treq_out || ((*treq_out)->pub.state == TRUNK_REQUEST_STATE_INIT),
2748 "%s requests must be in \"init\" state", __FUNCTION__)) return TRUNK_ENQUEUE_FAIL;
2749
2751
2752 /*
2753 * Limits check
2754 */
2755 if (!ignore_limits) {
2756 if (trunk->conf.max_req_per_conn &&
2759
2761 }
2762
2763 if (*treq_out) {
2764 treq = *treq_out;
2765 } else {
2766 MEM(*treq_out = treq = trunk_request_alloc(trunk, request));
2767 }
2768
2769 treq->pub.preq = preq;
2770 treq->pub.rctx = rctx;
2771 treq->bound_to_conn = true; /* Don't let the request be transferred */
2772
2773 if (trunk->conf.always_writable) {
2775 trunk_request_enter_pending(treq, tconn, true);
2778 } else {
2779 trunk_request_enter_pending(treq, tconn, true);
2780 }
2781
2782 return TRUNK_ENQUEUE_OK;
2783}
2784
2785#ifndef NDEBUG
2786/** Used for sanity checks to ensure all log entries have been freed
2787 *
2788 */
2790{
2791 fr_dlist_remove(slog->log_head, slog);
2792
2793 return 0;
2794}
2795
2796void trunk_request_state_log_entry_add(char const *function, int line,
2798{
2799 trunk_request_state_log_t *slog = NULL;
2800
2802 slog = fr_dlist_head(&treq->log);
2803 fr_assert_msg(slog, "slog list head NULL but element counter was %u",
2804 fr_dlist_num_elements(&treq->log));
2805 (void)fr_dlist_remove(&treq->log, slog); /* Returns NULL when removing the list head */
2806 memset(slog, 0, sizeof(*slog));
2807 } else {
2808 MEM(slog = talloc_zero(treq, trunk_request_state_log_t));
2809 talloc_set_destructor(slog, _state_log_entry_free);
2810 }
2811
2812 slog->log_head = &treq->log;
2813 slog->from = treq->pub.state;
2814 slog->to = new;
2815 slog->function = function;
2816 slog->line = line;
2817 if (treq->pub.tconn) {
2818 slog->tconn = treq->pub.tconn;
2819 slog->tconn_id = treq->pub.tconn->pub.conn->id;
2820 slog->tconn_state = treq->pub.tconn->pub.state;
2821 }
2822
2823 fr_dlist_insert_tail(&treq->log, slog);
2824
2825}
2826
2827void trunk_request_state_log(fr_log_t const *log, fr_log_type_t log_type, char const *file, int line,
2828 trunk_request_t const *treq)
2829{
2830 trunk_request_state_log_t *slog = NULL;
2831
2832 int i;
2833
2834 for (slog = fr_dlist_head(&treq->log), i = 0;
2835 slog;
2836 slog = fr_dlist_next(&treq->log, slog), i++) {
2837 fr_log(log, log_type, file, line, "[%u] %s:%i - in conn %"PRIu64" in state %s - %s -> %s",
2838 i, slog->function, slog->line,
2839 slog->tconn_id,
2841 slog->tconn_state, "<INVALID>") : "none",
2842 fr_table_str_by_value(trunk_request_states, slog->from, "<INVALID>"),
2843 fr_table_str_by_value(trunk_request_states, slog->to, "<INVALID>"));
2844 }
2845}
2846#endif
2847
2848/** Return the count number of connections in the specified states
2849 *
2850 * @param[in] trunk to retrieve counts for.
2851 * @param[in] conn_state One or more #trunk_connection_state_t states or'd together.
2852 * @return The number of connections in the specified states.
2853 */
2855{
2856 uint16_t count = 0;
2857
2858 if (conn_state & TRUNK_CONN_INIT) count += fr_dlist_num_elements(&trunk->init);
2859 if (conn_state & TRUNK_CONN_CONNECTING) count += fr_dlist_num_elements(&trunk->connecting);
2860 if (conn_state & TRUNK_CONN_ACTIVE) count += fr_minmax_heap_num_elements(trunk->active);
2861 if (conn_state & TRUNK_CONN_FULL) count += fr_dlist_num_elements(&trunk->full);
2862 if (conn_state & TRUNK_CONN_INACTIVE) count += fr_dlist_num_elements(&trunk->inactive);
2864 if (conn_state & TRUNK_CONN_CLOSED) count += fr_dlist_num_elements(&trunk->closed);
2865 if (conn_state & TRUNK_CONN_DRAINING) count += fr_dlist_num_elements(&trunk->draining);
2867
2868 return count;
2869}
2870
2871/** Return the count number of requests associated with a trunk connection
2872 *
2873 * @param[in] tconn to return request count for.
2874 * @param[in] req_state One or more request states or'd together.
2875 *
2876 * @return The number of requests in the specified states, associated with a tconn.
2877 */
2879{
2880 uint32_t count = 0;
2881
2883 if (req_state & TRUNK_REQUEST_STATE_PARTIAL) count += tconn->partial ? 1 : 0;
2884 if (req_state & TRUNK_REQUEST_STATE_SENT) count += fr_dlist_num_elements(&tconn->sent);
2886 if (req_state & TRUNK_REQUEST_STATE_CANCEL) count += fr_dlist_num_elements(&tconn->cancel);
2887 if (req_state & TRUNK_REQUEST_STATE_CANCEL_PARTIAL) count += tconn->cancel_partial ? 1 : 0;
2889
2890 return count;
2891}
2892
2893/** Automatically mark a connection as inactive
2894 *
2895 * @param[in] tconn to potentially mark as inactive.
2896 */
2898{
2899 trunk_t *trunk = tconn->pub.trunk;
2901
2902 if (tconn->pub.state != TRUNK_CONN_ACTIVE) return;
2903
2904 /*
2905 * Enforces max_req_per_conn
2906 */
2907 if (trunk->conf.max_req_per_conn > 0) {
2910 }
2911}
2912
2913/** Return whether a trunk connection should currently be considered full
2914 *
2915 * @param[in] tconn to check.
2916 * @return
2917 * - true if the connection is full.
2918 * - false if the connection is not full.
2919 */
2921{
2922 trunk_t *trunk = tconn->pub.trunk;
2924
2925 /*
2926 * Enforces max_req_per_conn
2927 */
2929 if ((trunk->conf.max_req_per_conn == 0) || (count < trunk->conf.max_req_per_conn)) return false;
2930
2931 return true;
2932}
2933
2934/** Automatically mark a connection as active or reconnect it
2935 *
2936 * @param[in] tconn to potentially mark as active or reconnect.
2937 */
2939{
2940 if (tconn->pub.state != TRUNK_CONN_FULL) return;
2941
2942 /*
2943 * Enforces max_req_per_conn
2944 */
2946}
2947
2948/** A connection is readable. Call the request_demux function to read pending requests
2949 *
2950 */
2952{
2953 trunk_t *trunk = tconn->pub.trunk;
2954
2955 DO_REQUEST_DEMUX(tconn);
2956}
2957
2958/** A connection is writable. Call the request_mux function to write pending requests
2959 *
2960 */
2962{
2963 trunk_t *trunk = tconn->pub.trunk;
2964
2965 /*
2966 * Call the cancel_sent function (if we have one)
2967 * to inform a backend datastore we no longer
2968 * care about the result
2969 */
2973 DO_REQUEST_CANCEL_MUX(tconn);
2974 }
2978 DO_REQUEST_MUX(tconn);
2979}
2980
2981/** Update the registrations for I/O events we're interested in
2982 *
2983 */
2985{
2986 trunk_t *trunk = tconn->pub.trunk;
2988
2989 switch (tconn->pub.state) {
2990 /*
2991 * We only register I/O events if the trunk connection is
2992 * in one of these states.
2993 *
2994 * For the other states the trunk shouldn't be processing
2995 * requests.
2996 */
2997 case TRUNK_CONN_ACTIVE:
2998 case TRUNK_CONN_FULL:
3003 /*
3004 * If the connection is always writable,
3005 * then we don't care about write events.
3006 */
3007 if (!trunk->conf.always_writable &&
3011 (trunk->funcs.request_cancel_mux ?
3015 }
3016
3019 (trunk->funcs.request_cancel_mux ?
3022 }
3023 break;
3024
3025 default:
3026 break;
3027 }
3028
3029 if (tconn->events != events) {
3030 /*
3031 * There may be a fatal error which results
3032 * in the connection being freed.
3033 *
3034 * Stop that from happening until after
3035 * we're done using it.
3036 */
3039 tconn->events = events;
3041 }
3042}
3043
3044/** Remove a trunk connection from whichever list it's currently in
3045 *
3046 * @param[in] tconn to remove.
3047 */
3049{
3050 trunk_t *trunk = tconn->pub.trunk;
3051
3052 switch (tconn->pub.state) {
3053 case TRUNK_CONN_ACTIVE:
3054 {
3055 int ret;
3056
3057 ret = fr_minmax_heap_extract(trunk->active, tconn);
3058 if (!fr_cond_assert_msg(ret == 0, "Failed extracting conn from active heap: %s", fr_strerror())) return;
3059 }
3060 return;
3061
3062 case TRUNK_CONN_INIT:
3063 fr_dlist_remove(&trunk->init, tconn);
3064 break;
3065
3067 fr_dlist_remove(&trunk->connecting, tconn);
3068 return;
3069
3070 case TRUNK_CONN_CLOSED:
3071 fr_dlist_remove(&trunk->closed, tconn);
3072 return;
3073
3074 case TRUNK_CONN_FULL:
3075 fr_dlist_remove(&trunk->full, tconn);
3076 return;
3077
3079 fr_dlist_remove(&trunk->inactive, tconn);
3080 return;
3081
3083 fr_dlist_remove(&trunk->inactive_draining, tconn);
3084 return;
3085
3087 fr_dlist_remove(&trunk->draining, tconn);
3088 return;
3089
3091 fr_dlist_remove(&trunk->draining_to_free, tconn);
3092 return;
3093
3094 case TRUNK_CONN_HALTED:
3095 return;
3096 }
3097}
3098
3099/** Transition a connection to the full state
3100 *
3101 * Called whenever a trunk connection is at the maximum number of requests.
3102 * Removes the connection from the connected heap, and places it in the full list.
3103 */
3105{
3106 trunk_t *trunk = tconn->pub.trunk;
3107
3108 switch (tconn->pub.state) {
3109 case TRUNK_CONN_ACTIVE:
3111 break;
3112
3113 default:
3115 }
3116
3117 fr_dlist_insert_head(&trunk->full, tconn);
3119}
3120
3121/** Transition a connection to the inactive state
3122 *
3123 * Called whenever the API client wants to stop new requests being enqueued
3124 * on a trunk connection.
3125 */
3127{
3128 trunk_t *trunk = tconn->pub.trunk;
3129
3130 switch (tconn->pub.state) {
3131 case TRUNK_CONN_ACTIVE:
3132 case TRUNK_CONN_FULL:
3134 break;
3135
3136 default:
3138 }
3139
3140 fr_dlist_insert_head(&trunk->inactive, tconn);
3142}
3143
3144/** Transition a connection to the inactive-draining state
3145 *
3146 * Called whenever the trunk manager wants to drain an inactive connection
3147 * of its requests.
3148 */
3150{
3151 trunk_t *trunk = tconn->pub.trunk;
3152
3153 switch (tconn->pub.state) {
3157 break;
3158
3159 default:
3161 }
3162
3165
3166 /*
3167 * Immediately re-enqueue all pending
3168 * requests, so the connection is drained
3169 * quicker.
3170 */
3172}
3173
3174/** Transition a connection to the draining state
3175 *
3176 * Removes the connection from the active heap so it won't be assigned any new
3177 * connections.
3178 */
3180{
3181 trunk_t *trunk = tconn->pub.trunk;
3182
3183 switch (tconn->pub.state) {
3184 case TRUNK_CONN_ACTIVE:
3185 case TRUNK_CONN_FULL:
3189 break;
3190
3191 default:
3193 }
3194
3195 fr_dlist_insert_head(&trunk->draining, tconn);
3197
3198 /*
3199 * Immediately re-enqueue all pending
3200 * requests, so the connection is drained
3201 * quicker.
3202 */
3204}
3205
3206/** Transition a connection to the draining-to-reconnect state
3207 *
3208 * Removes the connection from the active heap so it won't be assigned any new
3209 * connections.
3210 */
3212{
3213 trunk_t *trunk = tconn->pub.trunk;
3214
3216
3217 switch (tconn->pub.state) {
3218 case TRUNK_CONN_ACTIVE:
3219 case TRUNK_CONN_FULL:
3224 break;
3225
3226 default:
3228 }
3229
3230 fr_dlist_insert_head(&trunk->draining_to_free, tconn);
3232
3233 /*
3234 * Immediately re-enqueue all pending
3235 * requests, so the connection is drained
3236 * quicker.
3237 */
3239}
3240
3241
3242/** Transition a connection back to the active state
3243 *
3244 * This should only be called on a connection which is in the full state,
3245 * inactive state, draining state or connecting state.
3246 */
3248{
3249 trunk_t *trunk = tconn->pub.trunk;
3250 int ret;
3251
3252 switch (tconn->pub.state) {
3253 case TRUNK_CONN_FULL:
3258 break;
3259
3260 case TRUNK_CONN_INIT:
3264 break;
3265
3266 default:
3268 }
3269
3270 ret = fr_minmax_heap_insert(trunk->active, tconn); /* re-insert into the active heap*/
3271 if (!fr_cond_assert_msg(ret == 0, "Failed inserting connection into active heap: %s", fr_strerror())) {
3273 return;
3274 }
3275
3277
3278 /*
3279 * Reorder the connections
3280 */
3281 CONN_REORDER(tconn);
3282
3283 /*
3284 * Rebalance requests
3285 */
3286 trunk_rebalance(trunk);
3287
3288 /*
3289 * We place requests into the backlog
3290 * because there were no connections
3291 * available to handle them.
3292 *
3293 * If a connection has become active
3294 * chances are those backlogged requests
3295 * can now be enqueued, so try and do
3296 * that now.
3297 *
3298 * If there's requests sitting in the
3299 * backlog indefinitely, it's because
3300 * they were inserted there erroneously
3301 * when there were active connections
3302 * which could have handled them.
3303 */
3304 trunk_backlog_drain(trunk);
3305}
3306
3307/** Connection transitioned to the the init state
3308 *
3309 * Reflect the connection state change in the lists we use to track connections.
3310 *
3311 * @note This function is only called from the connection API as a watcher.
3312 *
3313 * @param[in] conn The connection which changes state.
3314 * @param[in] prev The connection is was in.
3315 * @param[in] state The connection is now in.
3316 * @param[in] uctx The trunk_connection_t wrapping the connection.
3317 */
3321 void *uctx)
3322{
3323 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
3324 trunk_t *trunk = tconn->pub.trunk;
3325
3326 switch (tconn->pub.state) {
3327 case TRUNK_CONN_HALTED:
3328 break;
3329
3330 case TRUNK_CONN_CLOSED:
3332 break;
3333
3334 default:
3336 }
3337
3338 fr_dlist_insert_head(&trunk->init, tconn);
3340}
3341
3342/** Connection transitioned to the connecting state
3343 *
3344 * Reflect the connection state change in the lists we use to track connections.
3345 *
3346 * @note This function is only called from the connection API as a watcher.
3347 *
3348 * @param[in] conn The connection which changes state.
3349 * @param[in] prev The connection is was in.
3350 * @param[in] state The connection is now in.
3351 * @param[in] uctx The trunk_connection_t wrapping the connection.
3352 */
3356 void *uctx)
3357{
3358 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
3359 trunk_t *trunk = tconn->pub.trunk;
3360
3361 switch (tconn->pub.state) {
3362 case TRUNK_CONN_INIT:
3363 case TRUNK_CONN_CLOSED:
3365 break;
3366
3367 default:
3369 }
3370
3371 /*
3372 * If a connection just entered the
3373 * connecting state, it should have
3374 * no requests associated with it.
3375 */
3377
3378 fr_dlist_insert_head(&trunk->connecting, tconn); /* MUST remain a head insertion for reconnect logic */
3380}
3381
3382/** Connection transitioned to the shutdown state
3383 *
3384 * If we're not already in the draining-to-free state, transition there now.
3385 *
3386 * The idea is that if something signalled the connection to shutdown, we need
3387 * to reflect that by dequeuing any pending requests, not accepting new ones,
3388 * and waiting for the existing requests to complete.
3389 *
3390 * @note This function is only called from the connection API as a watcher.
3391 *
3392 * @param[in] conn The connection which changes state.
3393 * @param[in] prev The connection is was in.
3394 * @param[in] state The connection is now in.
3395 * @param[in] uctx The trunk_connection_t wrapping the connection.
3396 */
3400 void *uctx)
3401{
3402 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
3403
3404 switch (tconn->pub.state) {
3405 case TRUNK_CONN_DRAINING_TO_FREE: /* Do Nothing */
3406 return;
3407
3408 case TRUNK_CONN_ACTIVE: /* Transition to draining-to-free */
3409 case TRUNK_CONN_FULL:
3413 break;
3414
3415 case TRUNK_CONN_INIT:
3417 case TRUNK_CONN_CLOSED:
3418 case TRUNK_CONN_HALTED:
3420 }
3421
3423}
3424
3425/** Trigger a reconnection of the trunk connection
3426 *
3427 * @param[in] tl timer list the timer was inserted into.
3428 * @param[in] now Current time.
3429 * @param[in] uctx The tconn.
3430 */
3432{
3433 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
3434
3436}
3437
3438/** Connection transitioned to the connected state
3439 *
3440 * Reflect the connection state change in the lists we use to track connections.
3441 *
3442 * @note This function is only called from the connection API as a watcher.
3443 *
3444 * @param[in] conn The connection which changes state.
3445 * @param[in] prev The connection is was in.
3446 * @param[in] state The connection is now in.
3447 * @param[in] uctx The trunk_connection_t wrapping the connection.
3448 */
3452 void *uctx)
3453{
3454 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
3455 trunk_t *trunk = tconn->pub.trunk;
3456
3457 /*
3458 * If a connection was just connected, it should only
3459 * have a pending list of requests. This state is found
3460 * in the rlm_radius module, which starts a new trunk,
3461 * and then immediately enqueues a request onto it. The
3462 * alternative for rlm_radius is to keep it's own queue
3463 * of pending requests before the trunk is fully
3464 * initialized. And then enqueue them onto the trunk
3465 * when the trunk is connected.
3466 *
3467 * It's instead easier (and makes more sense) to allow
3468 * the trunk to accept packets into its queue. If there
3469 * are no connections within a period of time, then the
3470 * requests will retry, or will time out.
3471 */
3473
3474 /*
3475 * Set here, as the active state can
3476 * be transitioned to from full and
3477 * draining too.
3478 */
3479 trunk->pub.last_connected = fr_time();
3480
3481 /*
3482 * Insert a timer to reconnect the
3483 * connection periodically.
3484 */
3485 if (fr_time_delta_ispos(trunk->conf.lifetime)) {
3486 if (fr_timer_in(tconn, trunk->el->tl, &tconn->lifetime_ev,
3487 trunk->conf.lifetime, false, _trunk_connection_lifetime_expire, tconn) < 0) {
3488 PERROR("Failed inserting connection reconnection timer event, halting connection");
3490 return;
3491 }
3492 }
3493
3495}
3496
3497/** Connection failed after it was connected
3498 *
3499 * Reflect the connection state change in the lists we use to track connections.
3500 *
3501 * @note This function is only called from the connection API as a watcher.
3502 *
3503 * @param[in] conn The connection which changes state.
3504 * @param[in] prev The connection is was in.
3505 * @param[in] state The connection is now in.
3506 * @param[in] uctx The trunk_connection_t wrapping the connection.
3507 */
3511 void *uctx)
3512{
3513 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
3514 trunk_t *trunk = tconn->pub.trunk;
3515 bool need_requeue = false;
3516
3517 switch (tconn->pub.state) {
3518 case TRUNK_CONN_ACTIVE:
3519 case TRUNK_CONN_FULL:
3524 need_requeue = true;
3526 break;
3527
3528 case TRUNK_CONN_INIT: /* Initialisation failed */
3532 break;
3533
3534 case TRUNK_CONN_CLOSED:
3535 case TRUNK_CONN_HALTED: /* Can't move backwards? */
3537 }
3538
3539 fr_dlist_insert_head(&trunk->closed, tconn); /* MUST remain a head insertion for reconnect logic */
3541
3542 /*
3543 * Now *AFTER* the connection has been
3544 * removed from the active, pool
3545 * re-enqueue the requests.
3546 */
3547 if (need_requeue) trunk_connection_requests_requeue_priv(tconn, TRUNK_REQUEST_STATE_ALL, 0, true);
3548
3549 /*
3550 * There should be no requests left on this
3551 * connection. They should have all been
3552 * moved off or failed.
3553 */
3555
3556 /*
3557 * Clear statistics and flags
3558 */
3559 tconn->sent_count = 0;
3560
3561 /*
3562 * Remove the reconnect event
3563 */
3565
3566 /*
3567 * Remove the I/O events
3568 */
3570}
3571
3572/** Connection failed
3573 *
3574 * @param[in] conn The connection which changes state.
3575 * @param[in] prev The connection is was in.
3576 * @param[in] state The connection is now in.
3577 * @param[in] uctx The trunk_connection_t wrapping the connection.
3578 */
3580 connection_state_t prev,
3582 void *uctx)
3583{
3584 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
3585 trunk_t *trunk = tconn->pub.trunk;
3586
3587 /*
3588 * Need to set this first as it
3589 * determines whether requests are
3590 * re-queued or fail outright.
3591 */
3592 trunk->pub.last_failed = fr_time();
3593
3594 /*
3595 * Failed in the init state, transition the
3596 * connection to closed, else we get an
3597 * INIT -> INIT transition which triggers
3598 * an assert.
3599 */
3600 if (prev == CONNECTION_STATE_INIT) _trunk_connection_on_closed(conn, prev, state, uctx);
3601
3602 /*
3603 * See what the state of the trunk is
3604 * if there are no connections that could
3605 * potentially accept requests in the near
3606 * future, then fail all the requests in the
3607 * trunk backlog.
3608 */
3614}
3615
3616/** Connection transitioned to the halted state
3617 *
3618 * Remove the connection remove all lists, as it's likely about to be freed.
3619 *
3620 * Setting the trunk back to the init state ensures that if the code is ever
3621 * refactored and #connection_signal_reconnect is used after a connection
3622 * is halted, then everything is maintained in a valid state.
3623 *
3624 * @note This function is only called from the connection API as a watcher.
3625 *
3626 * @param[in] conn The connection which changes state.
3627 * @param[in] prev The connection is was in.
3628 * @param[in] state The connection is now in.
3629 * @param[in] uctx The trunk_connection_t wrapping the connection.
3630 */
3634 void *uctx)
3635{
3636 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
3637 trunk_t *trunk = tconn->pub.trunk;
3638
3639 switch (tconn->pub.state) {
3640 case TRUNK_CONN_INIT:
3641 case TRUNK_CONN_CLOSED:
3643 break;
3644
3645 default:
3647 }
3648
3649 /*
3650 * It began life in the halted state,
3651 * and will end life in the halted state.
3652 */
3654
3655 /*
3656 * There should be no requests left on this
3657 * connection. They should have all been
3658 * moved off or failed.
3659 */
3661
3662 /*
3663 * And free the connection...
3664 */
3665 if (trunk->in_handler) {
3666 /*
3667 * ...later.
3668 */
3669 fr_dlist_insert_tail(&trunk->to_free, tconn);
3670 return;
3671 }
3672 talloc_free(tconn);
3673}
3674
3675/** Free a connection
3676 *
3677 * Enforces orderly free order of children of the tconn
3678 */
3680{
3682 fr_assert(!fr_dlist_entry_in_list(&tconn->entry)); /* Should not be in a list */
3683
3684 /*
3685 * Loop over all the requests we gathered
3686 * and transition them to the failed state,
3687 * freeing them.
3688 *
3689 * Usually, requests will be re-queued when
3690 * a connection enters the closed state,
3691 * but in this case because the whole trunk
3692 * is being freed, we don't bother, and
3693 * just signal to the API client that the
3694 * requests failed.
3695 */
3696 if (tconn->pub.trunk->freeing) {
3697 fr_dlist_head_t to_fail;
3698 trunk_request_t *treq = NULL;
3699
3700 fr_dlist_talloc_init(&to_fail, trunk_request_t, entry);
3701
3702 /*
3703 * Remove requests from this connection
3704 */
3706 while ((treq = fr_dlist_next(&to_fail, treq))) {
3707 trunk_request_t *prev;
3708
3709 prev = fr_dlist_remove(&to_fail, treq);
3711 treq = prev;
3712 }
3713 }
3714
3715 /*
3716 * Ensure we're not signalled by the connection
3717 * as it processes its backlog of state changes,
3718 * as we are about to be freed.
3719 */
3727
3728 /*
3729 * This may return -1, indicating the free was deferred
3730 * this is fine. It just means the conn will be freed
3731 * after all the handlers have exited.
3732 */
3733 (void)talloc_free(tconn->pub.conn);
3734 tconn->pub.conn = NULL;
3735
3736 return 0;
3737}
3738
3739/** Attempt to spawn a new connection
3740 *
3741 * Calls the API client's alloc() callback to create a new connection_t,
3742 * then inserts the connection into the 'connecting' list.
3743 *
3744 * @param[in] trunk to spawn connection in.
3745 * @param[in] now The current time.
3746 */
3748{
3749 trunk_connection_t *tconn;
3750
3751
3752 /*
3753 * Call the API client's callback to create
3754 * a new connection_t.
3755 */
3756 MEM(tconn = talloc_zero(trunk, trunk_connection_t));
3757 tconn->pub.trunk = trunk;
3758 tconn->pub.state = TRUNK_CONN_HALTED; /* All connections start in the halted state */
3759
3760 /*
3761 * Allocate a new connection_t or fail.
3762 */
3763 DO_CONNECTION_ALLOC(tconn);
3764
3766 fr_dlist_talloc_init(&tconn->sent, trunk_request_t, entry);
3770
3771 /*
3772 * OK, we have the connection, now setup watch
3773 * points so we know when it changes state.
3774 *
3775 * This lets us automatically move the tconn
3776 * between the different lists in the trunk
3777 * with minimum extra code.
3778 */
3780 _trunk_connection_on_init, false, tconn); /* Before init() has been called */
3781
3783 _trunk_connection_on_connecting, false, tconn); /* After init() has been called */
3784
3786 _trunk_connection_on_connected, false, tconn); /* After open() has been called */
3787
3789 _trunk_connection_on_closed, false, tconn); /* Before close() has been called */
3790
3792 _trunk_connection_on_failed, false, tconn); /* Before failed() has been called */
3793
3795 _trunk_connection_on_shutdown, false, tconn); /* After shutdown() has been called */
3796
3798 _trunk_connection_on_halted, false, tconn); /* About to be freed */
3799
3800 talloc_set_destructor(tconn, _trunk_connection_free);
3801
3802 connection_signal_init(tconn->pub.conn); /* annnnd GO! */
3803
3804 trunk->pub.last_open = now;
3805
3806 return 0;
3807}
3808
3809/** Pop a cancellation request off a connection's cancellation queue
3810 *
3811 * The request we return is advanced by the request moving out of the
3812 * cancel state and into the cancel_sent or cancel_complete state.
3813 *
3814 * One of these signalling functions must be called after the request
3815 * has been popped:
3816 *
3817 * - #trunk_request_signal_cancel_sent
3818 * The remote datastore has been informed, but we need to wait for acknowledgement.
3819 * The #trunk_request_demux_t callback must handle the acks calling
3820 * #trunk_request_signal_cancel_complete when an ack is received.
3821 *
3822 * - #trunk_request_signal_cancel_complete
3823 * The request was cancelled and we don't need to wait, clean it up immediately.
3824 *
3825 * @param[out] treq_out to process
3826 * @param[in] tconn Connection to drain cancellation request from.
3827 * @return
3828 * - 1 if no more requests.
3829 * - 0 if a new request was written to treq_out.
3830 * - -1 if the connection was previously freed. Caller *MUST NOT* touch any
3831 * memory or requests associated with the connection.
3832 * - -2 if called outside of the cancel muxer.
3833 */
3835{
3836 if (unlikely(tconn->pub.state == TRUNK_CONN_HALTED)) return -1;
3837
3839 "%s can only be called from within request_cancel_mux handler",
3840 __FUNCTION__)) return -2;
3841
3842 *treq_out = tconn->cancel_partial ? tconn->cancel_partial : fr_dlist_head(&tconn->cancel);
3843 if (!*treq_out) return 1;
3844
3845 return 0;
3846}
3847
3848/** Pop a request off a connection's pending queue
3849 *
3850 * The request we return is advanced by the request moving out of the partial or
3851 * pending states, when the mux function signals us.
3852 *
3853 * If the same request is returned again and again, it means the muxer isn't actually
3854 * doing anything with the request we returned, and it's and error in the muxer code.
3855 *
3856 * One of these signalling functions must be used after the request has been popped:
3857 *
3858 * - #trunk_request_signal_complete
3859 * The request was completed. Either we got a synchronous response, or we knew the
3860 * response without contacting an external server (cache).
3861 *
3862 * - #trunk_request_signal_fail
3863 * Failed muxing the request due to a permanent issue, i.e. an invalid request.
3864 *
3865 * - #trunk_request_signal_partial
3866 * Wrote part of a request. This request will be returned on the next call to this
3867 * function so that the request_mux function can finish writing it. Only useful
3868 * for stream type connections. Datagram type connections cannot have partial
3869 * writes.
3870 *
3871 * - #trunk_request_signal_sent Successfully sent a request.
3872 *
3873 * @param[out] treq_out to process
3874 * @param[in] tconn to pop a request from.
3875 * @return
3876 * - 1 if no more requests.
3877 * - 0 if a new request was written to treq_out.
3878 * - -1 if the connection was previously freed. Caller *MUST NOT* touch any
3879 * memory or requests associated with the connection.
3880 * - -2 if called outside of the muxer.
3881 */
3883{
3884 if (unlikely(tconn->pub.state == TRUNK_CONN_HALTED)) return -1;
3885
3887 "%s can only be called from within request_mux handler",
3888 __FUNCTION__)) return -2;
3889
3890 *treq_out = tconn->partial ? tconn->partial : fr_heap_peek(tconn->pending);
3891 if (!*treq_out) return 1;
3892
3893 return 0;
3894}
3895
3896/** Signal that a trunk connection is writable
3897 *
3898 * Should be called from the 'write' I/O handler to signal that requests can be enqueued.
3899 *
3900 * @param[in] tconn to signal.
3901 */
3903{
3904 trunk_t *trunk = tconn->pub.trunk;
3905
3906 if (!fr_cond_assert_msg(!IN_HANDLER(tconn->pub.trunk),
3907 "%s cannot be called within a handler", __FUNCTION__)) return;
3908
3909 DEBUG3("[%" PRIu64 "] Signalled writable", tconn->pub.conn->id);
3910
3912}
3913
3914/** Signal that a trunk connection is readable
3915 *
3916 * Should be called from the 'read' I/O handler to signal that requests should be dequeued.
3917 *
3918 * @param[in] tconn to signal.
3919 */
3921{
3922 trunk_t *trunk = tconn->pub.trunk;
3923
3924 if (!fr_cond_assert_msg(!IN_HANDLER(tconn->pub.trunk),
3925 "%s cannot be called within a handler", __FUNCTION__)) return;
3926
3927 DEBUG3("[%" PRIu64 "] Signalled readable", tconn->pub.conn->id);
3928
3930}
3931
3932/** Signal a trunk connection cannot accept more requests
3933 *
3934 * @param[in] tconn to signal.
3935 */
3937{
3938 /* Can be called anywhere */
3939
3940 switch (tconn->pub.state) {
3941 case TRUNK_CONN_ACTIVE:
3942 case TRUNK_CONN_FULL:
3944 break;
3945
3948 break;
3949
3950 default:
3951 return;
3952 }
3953}
3954
3955/** Signal a trunk connection is no longer full
3956 *
3957 * @param[in] tconn to signal.
3958 */
3960{
3961 switch (tconn->pub.state) {
3962 case TRUNK_CONN_FULL:
3963 trunk_connection_auto_unfull(tconn); /* Mark as active if it should be active */
3964 break;
3965
3967 /*
3968 * Do the appropriate state transition based on
3969 * how many requests the trunk connection is
3970 * currently servicing.
3971 */
3972 if (trunk_connection_is_full(tconn)) {
3974 break;
3975 }
3977 break;
3978
3979 /*
3980 * Unsetting the active flag just moves
3981 * the connection back to the normal
3982 * draining state.
3983 */
3984 case TRUNK_CONN_INACTIVE_DRAINING: /* Only an external signal can trigger this transition */
3986 break;
3987
3988 default:
3989 return;
3990 }
3991}
3992
3993/** Signal a trunk connection is no longer viable
3994 *
3995 * @param[in] tconn to signal.
3996 * @param[in] reason the connection is being reconnected.
3997 */
4002
4003/** Standard I/O read function
4004 *
4005 * Underlying FD in now readable, so call the trunk to read any pending requests
4006 * from this connection.
4007 *
4008 * @param[in] el The event list signalling.
4009 * @param[in] fd that's now readable.
4010 * @param[in] flags describing the read event.
4011 * @param[in] uctx The trunk connection handle (tconn).
4012 */
4014{
4015 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
4016
4018}
4019
4020/** Standard I/O write function
4021 *
4022 * Underlying FD is now writable, so call the trunk to write any pending requests
4023 * to this connection.
4024 *
4025 * @param[in] el The event list signalling.
4026 * @param[in] fd that's now writable.
4027 * @param[in] flags describing the write event.
4028 * @param[in] uctx The trunk connection handle (tcon).
4029 */
4031{
4032 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
4033
4035}
4036
4037
4038/** Returns true if the trunk connection is in one of the specified states
4039 *
4040 * @param[in] tconn To check state for.
4041 * @param[in] state to check
4042 * @return
4043 * - True if trunk connection is in a particular state.
4044 * - False if trunk connection is not in a particular state.
4045 */
4047{
4048 return (bool)(tconn->pub.state & state);
4049}
4050
4051/** Close connections in a particular connection list if they have no requests associated with them
4052 *
4053 * @param[in] trunk containing connections we want to close.
4054 * @param[in] head of list of connections to examine.
4055 */
4057{
4058 trunk_connection_t *tconn = NULL;
4059
4060 while ((tconn = fr_dlist_next(head, tconn))) {
4061 trunk_connection_t *prev;
4062
4064
4065 prev = fr_dlist_prev(head, tconn);
4066
4067 DEBUG3("Closing %s connection with no requests",
4069 /*
4070 * Close the connection as gracefully
4071 * as possible by signalling it should
4072 * shutdown.
4073 *
4074 * The connection, should, if serviced
4075 * correctly by the underlying library,
4076 * automatically transition to halted after
4077 * all pending reads/writes are
4078 * complete at which point we'll be informed
4079 * and free our tconn wrapper.
4080 */
4082 tconn = prev;
4083 }
4084}
4085
4086/** Rebalance connections across active trunk members when a new connection becomes active
4087 *
4088 * We don't have any visibility into the connection prioritisation algorithm
4089 * it's essentially a black box.
4090 *
4091 * We can however determine when the correct level of requests per connection
4092 * has been reached, by dequeuing and requeing requests up until the point
4093 * where the connection that just had a request dequeued, receives the same
4094 * request back.
4095 *
4096 * @param[in] trunk The trunk to rebalance.
4097 */
4098static void trunk_rebalance(trunk_t *trunk)
4099{
4101
4103
4104 /*
4105 * Only rebalance if the top and bottom of
4106 * the heap are not equal.
4107 */
4108 if (trunk->funcs.connection_prioritise(fr_minmax_heap_max_peek(trunk->active), head) == 0) return;
4109
4110 DEBUG3("Rebalancing requests");
4111
4112 /*
4113 * Keep requeuing requests from the connection
4114 * at the bottom of the heap until the
4115 * connection at the top is shifted from that
4116 * position.
4117 */
4118 while ((fr_minmax_heap_min_peek(trunk->active) == head) &&
4120 TRUNK_REQUEST_STATE_PENDING, 1, false));
4121}
4122
4123/** Implements the algorithm we use to manage requests per connection levels
4124 *
4125 * This is executed periodically using a timer event, and opens/closes
4126 * connections.
4127 *
4128 * The aim is to try and keep the request per connection level in a sweet spot,
4129 * where there's enough outstanding work for the connection/pipelining to work
4130 * efficiently, but not so much so that we encounter increased latency.
4131 *
4132 * In the request enqueue and dequeue functions we record every time the
4133 * average number of requests per connection goes above the target count
4134 * and record every time the average number of requests per connection goes
4135 * below the target count.
4136 *
4137 * This may sound expensive, but in all cases we're just summing counters.
4138 * CPU time required does not increase with additional requests, only with
4139 * large numbers of connections.
4140 *
4141 * If we do encounter scaling issues, we can always maintain the counters
4142 * as aggregates as an optimisation later.
4143 *
4144 * If when the management function runs, the trunk was above the target
4145 * most recently, we:
4146 * - Return if we've been in this state for a shorter period than 'open_delay'.
4147 * - Return if we're at max.
4148 * - Return if opening a new connection will take us below the load target.
4149 * - Return if we last opened a connection within 'open_delay'.
4150 * - Otherwise we attempt to open a new connection.
4151 *
4152 * If the trunk we below the target most recently, we:
4153 * - Return if we've been in this state for a shorter period than 'close_delay'.
4154 * - Return if we're at min.
4155 * - Return if we have no connections.
4156 * - Close a connection if min is 0, and we have no outstanding
4157 * requests. Then return.
4158 * - Return if closing a new connection will take us above the load target.
4159 * - Return if we last closed a connection within 'closed_delay'.
4160 * - Otherwise we move a connection to draining state.
4161 */
4162static void trunk_manage(trunk_t *trunk, fr_time_t now)
4163{
4164 trunk_connection_t *tconn = NULL;
4165 trunk_request_t *treq;
4166 uint32_t average = 0;
4167 uint32_t req_count;
4168 uint16_t conn_count;
4169 trunk_state_t new_state;
4170
4171 DEBUG4("Managing trunk");
4172
4173 /*
4174 * Cleanup requests in our request cache which
4175 * have been reapable for too long.
4176 */
4177 while ((treq = fr_dlist_tail(&trunk->free_requests)) &&
4179
4180 /*
4181 * If we have idle connections, then close them.
4182 */
4185 fr_time_t idle_cutoff = fr_time_sub(now, trunk->conf.idle_timeout);
4186
4187 for (tconn = fr_minmax_heap_iter_init(trunk->active, &iter);
4188 tconn;
4189 tconn = fr_minmax_heap_iter_next(trunk->active, &iter)) {
4190 /*
4191 * The connection has outstanding requests without replies, don't do anything.
4192 */
4193 if (fr_heap_num_elements(tconn->pending) > 0) continue;
4194
4195 /*
4196 * The connection was last active after the idle cutoff time, don't do anything.
4197 */
4198 if (fr_time_gt(tconn->pub.last_write_success, idle_cutoff)) continue;
4199
4200 /*
4201 * This connection has been inactive since before the idle timeout. Drain it,
4202 * and free it.
4203 */
4205 }
4206 }
4207
4208 /*
4209 * Free any connections which have drained
4210 * and we didn't reactivate during the last
4211 * round of management.
4212 */
4216
4217 /*
4218 * Process deferred connection freeing
4219 */
4220 if (!trunk->in_handler) {
4221 while ((tconn = fr_dlist_head(&trunk->to_free))) talloc_free(fr_dlist_remove(&trunk->to_free, tconn));
4222 }
4223
4224 /*
4225 * Update the state of the trunk
4226 */
4228 new_state = TRUNK_STATE_ACTIVE;
4229 } else {
4230 /*
4231 * INIT / CONNECTING / FULL mean connections will become active
4232 * so the trunk is PENDING
4233 */
4238 }
4239
4240 if (new_state != trunk->pub.state) TRUNK_STATE_TRANSITION(new_state);
4241
4242 /*
4243 * A trunk can be signalled to not proactively
4244 * manage connections if a destination is known
4245 * to be unreachable, and doing so would result
4246 * in spurious connections still being opened.
4247 *
4248 * We still run other connection management
4249 * functions and just short circuit the function
4250 * here.
4251 */
4252 if (!trunk->managing_connections) return;
4253
4254 /*
4255 * We're above the target requests per connection
4256 * spawn more connections!
4257 */
4259 /*
4260 * If connecting is provided, check we
4261 * wouldn't have too many connections in
4262 * the connecting state.
4263 *
4264 * This is a throttle in the case of transitory
4265 * load spikes, or a backend becoming
4266 * unavailable.
4267 */
4268 if ((trunk->conf.connecting > 0) &&
4270 trunk->conf.connecting)) {
4271 DEBUG4("Not opening connection - Too many (%u) connections in the connecting state",
4272 trunk->conf.connecting);
4273 return;
4274 }
4275
4276 trunk_requests_per_connection(&conn_count, &req_count, trunk, now, true);
4277
4278 /*
4279 * Only apply hysteresis if we have at least
4280 * one available connection.
4281 */
4282 if (conn_count && fr_time_gt(fr_time_add(trunk->pub.last_above_target, trunk->conf.open_delay), now)) {
4283 DEBUG4("Not opening connection - Need to be above target for %pVs. It's been %pVs",
4286 return; /* too soon */
4287 }
4288
4289 /*
4290 * We don't consider 'draining' connections
4291 * in the max calculation, as if we do
4292 * determine that we need to spawn a new
4293 * request, then we'd move all 'draining'
4294 * connections to active before spawning
4295 * any new connections.
4296 */
4297 if ((trunk->conf.max > 0) && (conn_count >= trunk->conf.max)) {
4298 DEBUG4("Not opening connection - Have %u connections, need %u or below",
4299 conn_count, trunk->conf.max);
4300 return;
4301 }
4302
4303 /*
4304 * We consider requests pending on all connections
4305 * and the trunk's backlog as that's the current count
4306 * load.
4307 */
4308 if (!req_count) {
4309 DEBUG4("Not opening connection - No outstanding requests");
4310 return;
4311 }
4312
4313 /*
4314 * Do the n+1 check, i.e. if we open one connection
4315 * will that take us below our target threshold.
4316 */
4317 if (conn_count > 0) {
4318 average = ROUND_UP_DIV(req_count, (conn_count + 1));
4319 if (average < trunk->conf.target_req_per_conn) {
4320 DEBUG4("Not opening connection - Would leave us below our target requests "
4321 "per connection (now %u, after open %u)",
4322 ROUND_UP_DIV(req_count, conn_count), average);
4323 return;
4324 }
4325 } else {
4326 (void)trunk_connection_spawn(trunk, now);
4327 return;
4328 }
4329
4330 /*
4331 * If we've got a connection in the draining list
4332 * move it back into the active list if we've
4333 * been requested to add a connection back in.
4334 */
4335 tconn = fr_dlist_head(&trunk->draining);
4336 if (tconn) {
4337 if (trunk_connection_is_full(tconn)) {
4339 } else {
4341 }
4342 return;
4343 }
4344
4345 /*
4346 * Implement delay if there's no connections that
4347 * could be immediately re-activated.
4348 */
4349 if (fr_time_gt(fr_time_add(trunk->pub.last_open, trunk->conf.open_delay), now)) {
4350 DEBUG4("Not opening connection - Need to wait %pVs before opening another connection. "
4351 "It's been %pVs",
4354 return;
4355 }
4356
4357 DEBUG4("Opening connection - Above target requests per connection (now %u, target %u)",
4358 ROUND_UP_DIV(req_count, conn_count), trunk->conf.target_req_per_conn);
4359 /* last_open set by trunk_connection_spawn */
4360 (void)trunk_connection_spawn(trunk, now);
4361 }
4362
4363 /*
4364 * We're below the target requests per connection.
4365 * Free some connections...
4366 */
4367 else if (fr_time_gt(trunk->pub.last_below_target, trunk->pub.last_above_target)) {
4368 if (fr_time_gt(fr_time_add(trunk->pub.last_below_target, trunk->conf.close_delay), now)) {
4369 DEBUG4("Not closing connection - Need to be below target for %pVs. It's been %pVs",
4372 return; /* too soon */
4373 }
4374
4375 trunk_requests_per_connection(&conn_count, &req_count, trunk, now, true);
4376
4377 if (!conn_count) {
4378 DEBUG4("Not closing connection - No connections to close!");
4379 return;
4380 }
4381
4382 if ((trunk->conf.min > 0) && ((conn_count - 1) < trunk->conf.min)) {
4383 DEBUG4("Not closing connection - Have %u connections, need %u or above",
4384 conn_count, trunk->conf.min);
4385 return;
4386 }
4387
4388 if (!req_count) {
4389 DEBUG4("Closing connection - No outstanding requests");
4390 goto close;
4391 }
4392
4393 /*
4394 * The minimum number of connections must be set
4395 * to zero for this to work.
4396 * min == 0, no requests, close all the connections.
4397 * This is useful for backup databases, when
4398 * maintaining the connection would lead to lots of
4399 * log file churn.
4400 */
4401 if (conn_count == 1) {
4402 DEBUG4("Not closing connection - Would leave connections "
4403 "and there are still %u outstanding requests", req_count);
4404 return;
4405 }
4406
4407 /*
4408 * Do the n-1 check, i.e. if we close one connection
4409 * will that take us above our target threshold.
4410 */
4411 average = ROUND_UP_DIV(req_count, (conn_count - 1));
4412 if (average > trunk->conf.target_req_per_conn) {
4413 DEBUG4("Not closing connection - Would leave us above our target requests per connection "
4414 "(now %u, after close %u)", ROUND_UP_DIV(req_count, conn_count), average);
4415 return;
4416 }
4417
4418 DEBUG4("Closing connection - Below target requests per connection (now %u, target %u)",
4419 ROUND_UP_DIV(req_count, conn_count), trunk->conf.target_req_per_conn);
4420
4421 close:
4422 if (fr_time_gt(fr_time_add(trunk->pub.last_closed, trunk->conf.close_delay), now)) {
4423 DEBUG4("Not closing connection - Need to wait %pVs before closing another connection. "
4424 "It's been %pVs",
4427 return;
4428 }
4429
4430 /*
4431 * If the last event on the trunk was a connection failure and
4432 * there is only one connection, this may well be a reconnect
4433 * attempt after a failure - and needs to persist otherwise
4434 * the last event will be a failure and no new connection will
4435 * be made, leading to no new requests being enqueued.
4436 */
4437 if (fr_time_gt(trunk->pub.last_failed, fr_time_wrap(0)) &&
4438 fr_time_lt(trunk->pub.last_connected, trunk->pub.last_failed) && (conn_count == 1)) {
4439 DEBUG4("Not closing remaining connection - last event was a failure");
4440 return;
4441 }
4442
4443 /*
4444 * Inactive connections get counted in the
4445 * set of viable connections, but are likely
4446 * to be congested or dead, so we drain
4447 * (and possibly eventually free) those first.
4448 */
4449 if ((tconn = fr_dlist_tail(&trunk->inactive))) {
4450 /*
4451 * If the connection has no requests associated
4452 * with it then immediately free.
4453 */
4455 connection_signal_halt(tconn->pub.conn); /* Also frees the tconn */
4456 } else {
4458 }
4459 /*
4460 * It is possible to have too may connecting
4461 * connections when the connections are
4462 * taking a while to open and the number
4463 * of requests decreases.
4464 */
4465 } else if ((tconn = fr_dlist_tail(&trunk->connecting))) {
4466 connection_signal_halt(tconn->pub.conn); /* Also frees the tconn */
4467
4468 /*
4469 * Finally if there are no "connecting"
4470 * connections to close, and no "inactive"
4471 * connections, start draining "active"
4472 * connections.
4473 */
4474 } else if ((tconn = fr_minmax_heap_max_peek(trunk->active))) {
4475 /*
4476 * If the connection has no requests associated
4477 * with it then immediately free.
4478 */
4480 connection_signal_halt(tconn->pub.conn); /* Also frees the tconn */
4481 } else {
4483 }
4484 }
4485
4486 trunk->pub.last_closed = now;
4487
4488
4489 return;
4490 }
4491}
4492
4493/** Event to periodically call the connection management function
4494 *
4495 * @param[in] tl this event belongs to.
4496 * @param[in] now current time.
4497 * @param[in] uctx The trunk.
4498 */
4499static void _trunk_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx)
4500{
4501 trunk_t *trunk = talloc_get_type_abort(uctx, trunk_t);
4502
4503 trunk_manage(trunk, now);
4504
4506 if (fr_timer_in(trunk, tl, &trunk->manage_ev, trunk->conf.manage_interval,
4507 false, _trunk_timer, trunk) < 0) {
4508 PERROR("Failed inserting trunk management event");
4509 /* Not much we can do, hopefully the trunk will be freed soon */
4510 }
4511 }
4512}
4513
4514/** Return a count of requests on a connection in a specific state
4515 *
4516 * @param[in] trunk to retrieve counts for.
4517 * @param[in] conn_state One or more connection states or'd together.
4518 * @param[in] req_state One or more request states or'd together.
4519 * @return The number of requests in a particular state, on connection in a particular state.
4520 */
4521uint64_t trunk_request_count_by_state(trunk_t *trunk, int conn_state, int req_state)
4522{
4523 uint64_t count = 0;
4524 trunk_connection_t *tconn = NULL;
4526
4527#define COUNT_BY_STATE(_state, _list) \
4528do { \
4529 if (conn_state & (_state)) { \
4530 tconn = NULL; \
4531 while ((tconn = fr_dlist_next(&trunk->_list, tconn))) { \
4532 count += trunk_request_count_by_connection(tconn, req_state); \
4533 } \
4534 } \
4535} while (0)
4536
4537 if (conn_state & TRUNK_CONN_ACTIVE) {
4538 for (tconn = fr_minmax_heap_iter_init(trunk->active, &iter);
4539 tconn;
4540 tconn = fr_minmax_heap_iter_next(trunk->active, &iter)) {
4541 count += trunk_request_count_by_connection(tconn, req_state);
4542 }
4543 }
4544
4547 COUNT_BY_STATE(TRUNK_CONN_INACTIVE_DRAINING, inactive_draining);
4550
4552
4553 return count;
4554}
4555
4556/** Update timestamps for when we last had a transition from above target to below target or vice versa
4557 *
4558 * Should be called on every time a connection or request is allocated or freed.
4559 *
4560 * @param[out] conn_count_out How many connections we considered.
4561 * @param[out] req_count_out How many requests we considered.
4562 * @param[in] trunk to operate on.
4563 * @param[in] now The current time.
4564 * @param[in] verify if true (and this is a debug build), then assert if req_per_conn
4565 * has changed.
4566 * @return
4567 * - 0 if the average couldn't be calculated (no requests or no connections).
4568 * - The average number of requests per connection.
4569 */
4570static uint64_t trunk_requests_per_connection(uint16_t *conn_count_out, uint32_t *req_count_out,
4571 trunk_t *trunk, fr_time_t now,
4572 NDEBUG_UNUSED bool verify)
4573{
4574 uint32_t req_count = 0;
4575 uint16_t conn_count = 0;
4576 uint64_t req_per_conn = 0;
4577
4579
4580 /*
4581 * No need to update these as the trunk is being freed
4582 */
4583 if (trunk->freeing) goto done;
4584
4585 /*
4586 * Count all connections except draining and draining to free.
4587 *
4588 * Omitting these connection states artificially raises the
4589 * request to connection ratio, so that we can preemptively spawn
4590 * new connections.
4591 *
4592 * In the case of TRUNK_CONN_DRAINING | TRUNK_CONN_INACTIVE_DRAINING
4593 * the trunk management code has enough hysteresis to not
4594 * immediately reactivate the connection.
4595 *
4596 * In the case of TRUNK_CONN_DRAINING_TO_FREE the trunk
4597 * management code should spawn a new connection to takes its place.
4598 *
4599 * Connections placed in the DRAINING_TO_FREE state are being
4600 * closed preemptively to deal with bugs on the server we're
4601 * talking to, or misconfigured firewalls which are trashing
4602 * TCP/UDP connection states.
4603 */
4608
4609 /*
4610 * Requests on all connections
4611 */
4612 req_count = trunk_request_count_by_state(trunk,
4615
4616 /*
4617 * No connections, but we do have requests
4618 */
4619 if (conn_count == 0) {
4620 if ((req_count > 0) && (trunk->conf.target_req_per_conn > 0)) goto above_target;
4621 goto done;
4622 }
4623
4624 if (req_count == 0) {
4625 if (trunk->conf.target_req_per_conn > 0) goto below_target;
4626 goto done;
4627 }
4628
4629 /*
4630 * Calculate the req_per_conn
4631 */
4632 req_per_conn = ROUND_UP_DIV(req_count, conn_count);
4633 if (req_per_conn > trunk->conf.target_req_per_conn) {
4634 above_target:
4635 /*
4636 * Edge - Below target to above target (too many requests per conn - spawn more)
4637 *
4638 * The equality check is correct here as both values start at 0.
4639 */
4641 } else if (req_per_conn < trunk->conf.target_req_per_conn) {
4642 below_target:
4643 /*
4644 * Edge - Above target to below target (too few requests per conn - close some)
4645 *
4646 * The equality check is correct here as both values start at 0.
4647 */
4649 }
4650
4651done:
4652 if (conn_count_out) *conn_count_out = conn_count;
4653 if (req_count_out) *req_count_out = req_count;
4654
4655 /*
4656 * Check we haven't missed a call to trunk_requests_per_connection
4657 */
4658 fr_assert(!verify || (trunk->last_req_per_conn == 0) || (req_per_conn == trunk->last_req_per_conn));
4659
4660 trunk->last_req_per_conn = req_per_conn;
4661
4662 return req_per_conn;
4663}
4664
4665/** Drain the backlog of as many requests as possible
4666 *
4667 * @param[in] trunk To drain backlog requests for.
4668 */
4669static void trunk_backlog_drain(trunk_t *trunk)
4670{
4671 trunk_request_t *treq;
4672
4673 if (fr_heap_num_elements(trunk->backlog) == 0) return;
4674
4675 /*
4676 * If it's always writable, this isn't
4677 * really a noteworthy event.
4678 */
4679 if (!trunk->conf.always_writable) DEBUG3("Draining backlog of requests");
4680
4681 /*
4682 * Do *NOT* add an artificial limit
4683 * here. We rely on all available
4684 * connections entering the full
4685 * state and transitioning back to
4686 * active in order to drain the
4687 * backlog.
4688 */
4689 while ((treq = fr_heap_peek(trunk->backlog))) {
4690 switch (trunk_request_enqueue_existing(treq)) {
4691 case TRUNK_ENQUEUE_OK:
4692 continue;
4693
4694 /*
4695 * Signal to stop
4696 */
4698 break;
4699
4700 /*
4701 * Failed enqueueing the request,
4702 * have it enter the failed state
4703 * which will free it and
4704 * re-enliven the yielded request.
4705 */
4707 case TRUNK_ENQUEUE_FAIL:
4709 continue;
4710
4713 return;
4714 }
4715 }
4716}
4717
4718/** Force the trunk to re-establish its connections
4719 *
4720 * @param[in] trunk to signal.
4721 * @param[in] states One or more states or'd together.
4722 * @param[in] reason Why the connections are being signalled to reconnect.
4723 */
4724void trunk_reconnect(trunk_t *trunk, int states, connection_reason_t reason)
4725{
4726
4727#define RECONNECT_BY_STATE(_state, _list) \
4728do { \
4729 if (states & (_state)) { \
4730 size_t i; \
4731 for (i = fr_dlist_num_elements(&trunk->_list); i > 0; i--) { \
4732 connection_signal_reconnect(((trunk_connection_t *)fr_dlist_tail(&trunk->_list))->pub.conn, reason); \
4733 } \
4734 } \
4735} while (0)
4736
4737 /*
4738 * Connections in the 'connecting' state
4739 * may re-enter that state, so we need to
4740 * be careful not to enter an infinite
4741 * loop, as we iterate over the list
4742 * again and again.
4743 */
4745
4746 if (states & TRUNK_CONN_ACTIVE) {
4747 trunk_connection_t *tconn;
4748 while ((tconn = fr_minmax_heap_min_peek(trunk->active))) connection_signal_reconnect(tconn->pub.conn, reason);
4749 }
4750
4758}
4759
4760/** Start the trunk running
4761 *
4762 */
4764{
4765 uint16_t i;
4766
4767 if (unlikely(trunk->started)) return 0;
4768
4769 /*
4770 * Spawn the initial set of connections
4771 */
4772 for (i = 0; i < trunk->conf.start; i++) {
4773 DEBUG("[%i] Starting initial connection", i);
4774 if (trunk_connection_spawn(trunk, fr_time()) != 0) return -1;
4775 }
4776
4777 /*
4778 * If the idle timeout is set, AND there's no management interval, OR the management interval is
4779 * less than the idle timeout, update the management interval.
4780 */
4784 trunk->conf.manage_interval = trunk->conf.idle_timeout;
4785 }
4786
4788 /*
4789 * Insert the event timer to manage
4790 * the interval between managing connections.
4791 */
4792 if (fr_timer_in(trunk, trunk->el->tl, &trunk->manage_ev, trunk->conf.manage_interval,
4793 false, _trunk_timer, trunk) < 0) {
4794 PERROR("Failed inserting trunk management event");
4795 return -1;
4796 }
4797 }
4798 trunk->started = true;
4799 trunk->managing_connections = true;
4800
4801 return 0;
4802}
4803
4804/** Allow the trunk to open and close connections in response to load
4805 *
4806 */
4808{
4809 if (!trunk->started || trunk->managing_connections) return;
4810
4811 DEBUG3("Connection management enabled");
4812 trunk->managing_connections = true;
4813}
4814
4815/** Stop the trunk from opening and closing connections in response to load
4816 *
4817 */
4819{
4820 if (!trunk->started || !trunk->managing_connections) return;
4821
4822 DEBUG3("Connection management disabled");
4823 trunk->managing_connections = false;
4824}
4825
4826/** Schedule a trunk management event for the next time the event loop is executed
4827 */
4829{
4830 if (!trunk->started || !trunk->managing_connections) return 0;
4831
4832 if (fr_timer_in(trunk, trunk->el->tl, &trunk->manage_ev, fr_time_delta_wrap(0),
4833 false, _trunk_timer, trunk) < 0) {
4834 PERROR("Failed inserting trunk management event");
4835 return -1;
4836 }
4837
4838 return 0;
4839}
4840
4841/** Order connections by queue depth
4842 *
4843 */
4844static int8_t _trunk_connection_order_by_shortest_queue(void const *one, void const *two)
4845{
4848
4851
4852 /*
4853 * Add a fudge factor of 1 to reduce spurious rebalancing
4854 */
4855 return ((a_count > b_count) && ((a_count - b_count) > 1)) - ((b_count > a_count) && ((b_count - a_count) > 1));
4856}
4857
4858/** Free a trunk, gracefully closing all connections.
4859 *
4860 */
4861static int _trunk_free(trunk_t *trunk)
4862{
4863 trunk_connection_t *tconn;
4864 trunk_request_t *treq;
4865 trunk_watch_entry_t *watch;
4866 size_t i;
4867
4868 DEBUG4("Trunk free %p", trunk);
4869
4870 trunk->freeing = true; /* Prevent re-enqueuing */
4871
4872 /*
4873 * We really don't want this firing after
4874 * we've freed everything.
4875 */
4877
4878 /*
4879 * Now free the connections in each of the lists.
4880 *
4881 * Each time a connection is freed it removes itself from the list
4882 * its in, which means the head should keep advancing automatically.
4883 */
4884 while ((tconn = fr_minmax_heap_min_peek(trunk->active))) connection_signal_halt(tconn->pub.conn);
4885 while ((tconn = fr_dlist_head(&trunk->init))) connection_signal_halt(tconn->pub.conn);
4886 while ((tconn = fr_dlist_head(&trunk->connecting))) connection_signal_halt(tconn->pub.conn);
4887 while ((tconn = fr_dlist_head(&trunk->full))) connection_signal_halt(tconn->pub.conn);
4888 while ((tconn = fr_dlist_head(&trunk->inactive))) connection_signal_halt(tconn->pub.conn);
4889 while ((tconn = fr_dlist_head(&trunk->inactive_draining))) connection_signal_halt(tconn->pub.conn);
4890 while ((tconn = fr_dlist_head(&trunk->closed))) connection_signal_halt(tconn->pub.conn);
4891 while ((tconn = fr_dlist_head(&trunk->draining))) connection_signal_halt(tconn->pub.conn);
4892 while ((tconn = fr_dlist_head(&trunk->draining_to_free))) connection_signal_halt(tconn->pub.conn);
4893
4894 /*
4895 * Process any deferred connection frees
4896 */
4897 while ((tconn = fr_dlist_head(&trunk->to_free))) talloc_free(fr_dlist_remove(&trunk->to_free, tconn));
4898
4899 /*
4900 * Free any requests left in the backlog
4901 */
4902 while ((treq = fr_heap_peek(trunk->backlog))) trunk_request_enter_failed(treq);
4903
4904 /*
4905 * Free any requests in our request cache
4906 */
4907 while ((treq = fr_dlist_head(&trunk->free_requests))) talloc_free(treq);
4908
4909 /*
4910 * Free any entries in the watch lists
4911 */
4912 for (i = 0; i < NUM_ELEMENTS(trunk->watch); i++) {
4913 while ((watch = fr_dlist_pop_head(&trunk->watch[i]))) talloc_free(watch);
4914 }
4915
4916 return 0;
4917}
4918
4919/** Allocate a new collection of connections
4920 *
4921 * This function should be called first to allocate a new trunk connection.
4922 *
4923 * After the trunk has been allocated, #trunk_request_alloc and
4924 * #trunk_request_enqueue should be used to allocate memory for trunk
4925 * requests, and pass a preq (protocol request) to the trunk for
4926 * processing.
4927 *
4928 * The trunk will then asynchronously process the request, writing the result
4929 * to a specified rctx. See #trunk_request_enqueue for more details.
4930 *
4931 * @note Trunks may not be shared between multiple threads under any circumstances.
4932 *
4933 * @param[in] ctx To use for any memory allocations. Must be thread local.
4934 * @param[in] el to use for I/O and timer events.
4935 * @param[in] funcs Callback functions.
4936 * @param[in] conf Common user configurable parameters.
4937 * @param[in] log_prefix To prepend to global messages.
4938 * @param[in] uctx User data to pass to the alloc function.
4939 * @param[in] delay_start If true, then we will not spawn any connections
4940 * until the first request is enqueued.
4941 * @return
4942 * - New trunk handle on success.
4943 * - NULL on error.
4944 */
4946 trunk_io_funcs_t const *funcs, trunk_conf_t const *conf,
4947 char const *log_prefix, void const *uctx, bool delay_start)
4948{
4949 trunk_t *trunk;
4950 size_t i;
4951
4952 /*
4953 * Check we have the functions we need
4954 */
4955 if (!fr_cond_assert(funcs->connection_alloc)) return NULL;
4956
4957 MEM(trunk = talloc_zero(ctx, trunk_t));
4958 trunk->el = el;
4959 trunk->log_prefix = talloc_strdup(trunk, log_prefix);
4960
4961 memcpy(&trunk->funcs, funcs, sizeof(trunk->funcs));
4962 if (!trunk->funcs.connection_prioritise) {
4964 }
4966
4967 memcpy(&trunk->conf, conf, sizeof(trunk->conf));
4968
4969 memcpy(&trunk->uctx, &uctx, sizeof(trunk->uctx));
4970 talloc_set_destructor(trunk, _trunk_free);
4971
4972 /*
4973 * Unused request list...
4974 */
4976
4977 /*
4978 * Request backlog queue
4979 */
4981 trunk_request_t, heap_id, 0));
4982
4983 /*
4984 * Connection queues and trees
4985 */
4987 trunk_connection_t, heap_id, 0));
4997
4998 /*
4999 * Watch lists
5000 */
5001 for (i = 0; i < NUM_ELEMENTS(trunk->watch); i++) {
5003 }
5004
5005 DEBUG4("Trunk allocated %p", trunk);
5006
5007 if (!delay_start) {
5008 if (trunk_start(trunk) < 0) {
5009 talloc_free(trunk);
5010 return NULL;
5011 }
5012 }
5013
5014 return trunk;
5015}
5016
5017#ifndef TALLOC_GET_TYPE_ABORT_NOOP
5018/** Verify a trunk
5019 *
5020 * A trunk has some number of connections, which each have some number of requests. The connections and
5021 * requests are in differing kinds of containers depending on their state and how they are used, and may
5022 * have fields that can only be validated by comparison with a parent. We had planned on passing a "context"
5023 * down with the ancestral values, but that breaks the foo_verify() API. Each foo_verify() will only verify the
5024 * foo's children.
5025 */
5026void trunk_verify(char const *file, int line, trunk_t *trunk)
5027{
5028 fr_fatal_assert_msg(trunk, "CONSISTENCY CHECK FAILED %s[%i]: trunk_t pointer was NULL", file, line);
5029 (void) talloc_get_type_abort(trunk, trunk_t);
5030
5031 for (size_t i = 0; i < NUM_ELEMENTS(trunk->watch); i++) {
5032 _fr_dlist_verify(file, line, &trunk->watch[i]);
5033 }
5034
5035#define IO_FUNC_VERIFY(_func) \
5036 fr_fatal_assert_msg(trunk->funcs._func, "CONSISTENCY_CHECK_FAILED %s[%i}: " #_func " was NULL", file, line)
5037
5038 /*
5039 * Only a few of the function pointers *must* be non-NULL..
5040 */
5042 IO_FUNC_VERIFY(connection_prioritise);
5044
5045#define TRUNK_TCONN_CHECKS(_tconn, _state) \
5046do { \
5047 fr_fatal_assert_msg(trunk == _tconn->pub.trunk, \
5048 "CONSISTENCY_CHECK_FAILED %s[%i}: connection-trunk mismatch", file, line); \
5049 fr_fatal_assert_msg(_state == _tconn->pub.state, \
5050 "CONSISTENCY_CHECK_FAILED %s[%i}: connection-state mismatch", file, line); \
5051} while (0)
5052
5053#define TCONN_DLIST_VERIFY(_dlist, _state) \
5054do { \
5055 _fr_dlist_verify(file, line, &(trunk->_dlist)); \
5056 fr_dlist_foreach(&(trunk->_dlist), trunk_connection_t, tconn) { \
5057 trunk_connection_verify(file, line, tconn); \
5058 TRUNK_TCONN_CHECKS(tconn, _state); \
5059 } \
5060} while (0)
5061
5062#define TCONN_MINMAX_HEAP_VERIFY(_heap, _state) \
5063do {\
5064 fr_minmax_heap_verify(file, line, trunk->_heap); \
5065 fr_minmax_heap_foreach(trunk->_heap, trunk_connection_t, tconn) { \
5066 trunk_connection_verify(file, line, tconn); \
5067 TRUNK_TCONN_CHECKS(tconn, _state); \
5068 }} \
5069} while (0)
5070
5071 fr_dlist_verify(&(trunk->free_requests));
5072 FR_HEAP_VERIFY(trunk->backlog);
5073
5080 /* TCONN_DLIST_VERIFY(failed, ???); */
5085}
5086
5088{
5089 fr_fatal_assert_msg(tconn, "CONSISTENCY CHECK FAILED %s[%i]: trunk_connection_t pointer was NULL", file, line);
5090 (void) talloc_get_type_abort(tconn, trunk_connection_t);
5091
5092 (void) talloc_get_type_abort(tconn->pub.trunk, trunk_t);
5093
5094 /*
5095 * shouldn't be both in heap and on list--but it doesn't look like moves
5096 * to active heap wipe the dlist pointers.
5097 */
5098
5099#define TCONN_TREQ_CHECKS(_treq, _state) \
5100do { \
5101 fr_fatal_assert_msg(tconn == _treq->pub.tconn, \
5102 "CONSISTENCY_CHECK_FAILED %s[%i}: trunk request-tconn mismatch", file, line); \
5103 fr_fatal_assert_msg(tconn->pub.trunk == _treq->pub.trunk, \
5104 "CONSISTENCY_CHECK_FAILED %s[%i}: trunk request-trunk mismatch", file, line); \
5105 fr_fatal_assert_msg(_state == _treq->pub.state, \
5106 "CONSISTENCY_CHECK_FAILED %s[%i}: trunk request-state mismatch", file, line); \
5107} while (0)
5108
5109#define TREQ_DLIST_VERIFY(_dlist, _state) \
5110do { \
5111 _fr_dlist_verify(file, line, &(tconn->_dlist)); \
5112 fr_dlist_foreach(&(tconn->_dlist), trunk_request_t, treq) { \
5113 trunk_request_verify(file, line, treq); \
5114 TCONN_TREQ_CHECKS(treq, _state); \
5115 } \
5116} while (0)
5117
5118#define TREQ_HEAP_VERIFY(_heap, _state) \
5119do { \
5120 fr_heap_iter_t _iter; \
5121 fr_heap_verify(file, line, tconn->_heap); \
5122 for (trunk_request_t *treq = fr_heap_iter_init(tconn->_heap, &_iter); \
5123 treq; \
5124 treq = fr_heap_iter_next(tconn->_heap, &_iter)) { \
5125 trunk_request_verify(file, line, treq); \
5126 TCONN_TREQ_CHECKS(treq, _state); \
5127 } \
5128} while (0)
5129
5130#define TREQ_OPTION_VERIFY(_option, _state) \
5131do { \
5132 if (tconn->_option) { \
5133 trunk_request_verify(file, line, tconn->_option); \
5134 TCONN_TREQ_CHECKS(tconn->_option, _state); \
5135 } \
5136} while (0)
5137
5138 /* verify associated requests */
5145}
5146
5147void trunk_request_verify(char const *file, int line, trunk_request_t *treq)
5148{
5149 fr_fatal_assert_msg(treq, "CONSISTENCY CHECK FAILED %s[%i]: trunk_request_t pointer was NULL", file, line);
5150 (void) talloc_get_type_abort(treq, trunk_request_t);
5151
5152#ifdef WITH_VERIFY_PTR
5153 if (treq->pub.request) request_verify(file, line, treq->pub.request);
5154#endif
5155}
5156
5157
5158bool trunk_search(trunk_t *trunk, void *ptr)
5159{
5160#define TCONN_DLIST_SEARCH(_dlist) \
5161do { \
5162 fr_dlist_foreach(&(trunk->_dlist), trunk_connection_t, tconn) { \
5163 if (ptr == tconn) { \
5164 fr_fprintf(stderr, "trunk_search: tconn %p on " #_dlist "\n", ptr); \
5165 return true; \
5166 } \
5167 if (trunk_connection_search(tconn, ptr)) { \
5168 fr_fprintf(stderr, " in tconn %p on " #_dlist "\n", tconn); \
5169 return true; \
5170 } \
5171 } \
5172} while (0)
5173
5174#define TCONN_MINMAX_HEAP_SEARCH(_heap) \
5175do { \
5176 fr_minmax_heap_foreach(trunk->_heap, trunk_connection_t, tconn) { \
5177 if (ptr == tconn) { \
5178 fr_fprintf(stderr, "trunk_search: tconn %p on " #_heap "\n", ptr); \
5179 return true; \
5180 } \
5181 if (trunk_connection_search(tconn, ptr)) { \
5182 fr_fprintf(stderr, " on tconn %p on " #_heap "\n", tconn); \
5183 return true; \
5184 } \
5185 }}\
5186} while (0)
5187
5189 TCONN_DLIST_SEARCH(connecting);
5191 TCONN_DLIST_SEARCH(full);
5192 TCONN_DLIST_SEARCH(inactive);
5193 TCONN_DLIST_SEARCH(inactive_draining);
5194 TCONN_DLIST_SEARCH(failed);
5195 TCONN_DLIST_SEARCH(closed);
5196 TCONN_DLIST_SEARCH(draining);
5197 TCONN_DLIST_SEARCH(draining_to_free);
5198 TCONN_DLIST_SEARCH(to_free);
5199
5200 return false;
5201}
5202
5204{
5205#define TREQ_DLIST_SEARCH(_dlist) \
5206do { \
5207 fr_dlist_foreach(&(tconn->_dlist), trunk_request_t, treq) { \
5208 if (ptr == treq) { \
5209 fr_fprintf(stderr, "trunk_search: treq %p on " #_dlist "\n", ptr); \
5210 return true; \
5211 } \
5212 if (trunk_request_search(treq, ptr)) { \
5213 fr_fprintf(stderr, "trunk_search: preq %p found on " #_dlist, ptr); \
5214 return true; \
5215 } \
5216 } \
5217} while (0)
5218
5219#define TREQ_HEAP_SEARCH(_heap) \
5220do { \
5221 fr_heap_iter_t _iter; \
5222 for (trunk_request_t *treq = fr_heap_iter_init(tconn->_heap, &_iter); \
5223 treq; \
5224 treq = fr_heap_iter_next(tconn->_heap, &_iter)) { \
5225 if (ptr == treq) { \
5226 fr_fprintf(stderr, "trunk_search: treq %p in " #_heap "\n", ptr); \
5227 return true; \
5228 } \
5229 if (trunk_request_search(treq, ptr)) { \
5230 fr_fprintf(stderr, "trunk_search: preq %p found in " #_heap, ptr); \
5231 return true; \
5232 } \
5233 } \
5234} while (0)
5235
5236#define TREQ_OPTION_SEARCH(_option) \
5237do { \
5238 if (tconn->_option) { \
5239 if (ptr == tconn->_option) { \
5240 fr_fprintf(stderr, "trunk_search: treq %p is " #_option "\n", ptr); \
5241 return true; \
5242 } \
5243 if (trunk_request_search(tconn->_option, ptr)) { \
5244 fr_fprintf(stderr, "trunk_search: preq %p found in " #_option, ptr); \
5245 return true; \
5246 } \
5247 } \
5248} while (0)
5249
5250 /* search associated requests */
5251 TREQ_HEAP_SEARCH(pending);
5252 TREQ_DLIST_SEARCH(sent);
5253 TREQ_DLIST_SEARCH(cancel);
5254 TREQ_DLIST_SEARCH(cancel_sent);
5255 TREQ_OPTION_SEARCH(partial);
5256 TREQ_OPTION_SEARCH(cancel_partial);
5257
5258 return false;
5259}
5260
5262{
5263 return treq->pub.preq == ptr;
5264}
5265#endif
int const char * file
Definition acutest.h:702
int const char int line
Definition acutest.h:702
void request_verify(UNUSED char const *file, UNUSED int line, UNUSED request_t *request)
static bool init
Definition fuzzer.c:41
#define L(_str)
Helper for initialising arrays of string literals.
Definition build.h:209
#define NDEBUG_UNUSED
Definition build.h:328
#define FALL_THROUGH
clang 10 doesn't recognised the FALL-THROUGH comment anymore
Definition build.h:324
#define unlikely(_x)
Definition build.h:383
#define UNUSED
Definition build.h:317
#define NUM_ELEMENTS(_t)
Definition build.h:339
#define CONF_PARSER_TERMINATOR
Definition cf_parse.h:658
#define FR_CONF_OFFSET(_name, _struct, _field)
conf_parser_t which parses a single CONF_PAIR, writing the result to a field in a struct
Definition cf_parse.h:284
#define FR_CONF_POINTER(_name, _type, _flags, _res_p)
conf_parser_t which parses a single CONF_PAIR producing a single global result
Definition cf_parse.h:339
#define FR_CONF_OFFSET_SUBSECTION(_name, _flags, _struct, _field, _subcs)
conf_parser_t which populates a sub-struct using a CONF_SECTION
Definition cf_parse.h:313
@ CONF_FLAG_SUBSECTION
Instead of putting the information into a configuration structure, the configuration file routines MA...
Definition cf_parse.h:428
Defines a CONF_PAIR to C data type mapping.
Definition cf_parse.h:595
connection_state_t
Definition connection.h:45
@ CONNECTION_STATE_FAILED
Connection has failed.
Definition connection.h:54
@ CONNECTION_STATE_HALTED
The connection is in a halted stat.
Definition connection.h:46
@ CONNECTION_STATE_CLOSED
Connection has been closed.
Definition connection.h:55
@ CONNECTION_STATE_CONNECTED
File descriptor is open (ready for writing).
Definition connection.h:52
@ CONNECTION_STATE_INIT
Init state, sets up connection.
Definition connection.h:49
@ CONNECTION_STATE_CONNECTING
Waiting for connection to establish.
Definition connection.h:50
@ CONNECTION_STATE_SHUTDOWN
Connection is shutting down.
Definition connection.h:53
connection_reason_t
Definition connection.h:83
static size_t min(size_t x, size_t y)
Definition dbuff.c:66
#define fr_cond_assert(_x)
Calls panic_action ifndef NDEBUG, else logs error and evaluates to value of _x.
Definition debug.h:139
#define fr_assert_msg(_x, _msg,...)
Calls panic_action ifndef NDEBUG, else logs error and causes the server to exit immediately with code...
Definition debug.h:210
#define fr_cond_assert_msg(_x, _fmt,...)
Calls panic_action ifndef NDEBUG, else logs error and evaluates to value of _x.
Definition debug.h:156
#define fr_fatal_assert_msg(_x, _fmt,...)
Calls panic_action ifndef NDEBUG, else logs error and causes the server to exit immediately with code...
Definition debug.h:184
#define MEM(x)
Definition debug.h:36
#define DEBUG(fmt,...)
Definition dhcpclient.c:39
#define fr_dlist_init(_head, _type, _field)
Initialise the head structure of a doubly linked list.
Definition dlist.h:260
static void * fr_dlist_head(fr_dlist_head_t const *list_head)
Return the HEAD item of a list or NULL if the list is empty.
Definition dlist.h:486
static void _fr_dlist_verify(char const *file, int line, fr_dlist_head_t const *list_head)
Check all items in the list are valid.
Definition dlist.h:735
static void * fr_dlist_remove(fr_dlist_head_t *list_head, void *ptr)
Remove an item from the list.
Definition dlist.h:638
static bool fr_dlist_entry_in_list(fr_dlist_t const *entry)
Check if a list entry is part of a list.
Definition dlist.h:163
static void * fr_dlist_prev(fr_dlist_head_t const *list_head, void const *ptr)
Get the previous item in a list.
Definition dlist.h:588
static unsigned int fr_dlist_num_elements(fr_dlist_head_t const *head)
Return the number of elements in the dlist.
Definition dlist.h:939
static void * fr_dlist_pop_head(fr_dlist_head_t *list_head)
Remove the head item in a list.
Definition dlist.h:672
static void * fr_dlist_tail(fr_dlist_head_t const *list_head)
Return the TAIL item of a list or NULL if the list is empty.
Definition dlist.h:531
static int fr_dlist_insert_tail(fr_dlist_head_t *list_head, void *ptr)
Insert an item into the tail of a list.
Definition dlist.h:378
#define fr_dlist_verify(_head)
Definition dlist.h:755
#define fr_dlist_talloc_init(_head, _type, _field)
Initialise the head structure of a doubly linked list.
Definition dlist.h:275
static int fr_dlist_insert_head(fr_dlist_head_t *list_head, void *ptr)
Insert an item into the head of a list.
Definition dlist.h:338
static void * fr_dlist_next(fr_dlist_head_t const *list_head, void const *ptr)
Get the next item in a list.
Definition dlist.h:555
Head of a doubly linked list.
Definition dlist.h:51
Entry in a doubly linked list.
Definition dlist.h:41
int fr_heap_insert(fr_heap_t **hp, void *data)
Insert a new element into the heap.
Definition heap.c:146
unsigned int fr_heap_index_t
Definition heap.h:80
static void * fr_heap_peek(fr_heap_t *h)
Return the item from the top of the heap but don't pop it.
Definition heap.h:136
#define FR_HEAP_VERIFY(_heap)
Definition heap.h:212
static unsigned int fr_heap_num_elements(fr_heap_t *h)
Return the number of elements in the heap.
Definition heap.h:179
#define fr_heap_talloc_alloc(_ctx, _cmp, _talloc_type, _field, _init)
Creates a heap that verifies elements are of a specific talloc type.
Definition heap.h:115
The main heap structure.
Definition heap.h:66
#define PERROR(_fmt,...)
Definition log.h:228
#define DEBUG3(_fmt,...)
Definition log.h:266
#define ROPTIONAL(_l_request, _l_global, _fmt,...)
Use different logging functions depending on whether request is NULL or not.
Definition log.h:528
#define RDEBUG3(fmt,...)
Definition log.h:343
#define RWARN(fmt,...)
Definition log.h:297
#define DEBUG4(_fmt,...)
Definition log.h:267
#define RATE_LIMIT_LOCAL_ROPTIONAL(_entry, _l_request, _l_global, _fmt,...)
Rate limit messages using a local limiting entry.
Definition log.h:606
Track when a log message was last repeated.
Definition log.h:547
talloc_free(reap)
Stores all information relating to an event list.
Definition event.c:377
void fr_log(fr_log_t const *log, fr_log_type_t type, char const *file, int line, char const *fmt,...)
Send a server log message to its destination.
Definition log.c:580
fr_log_type_t
Definition log.h:54
#define ROUND_UP_DIV(_x, _y)
Get the ceiling value of integer division.
Definition math.h:153
unsigned short uint16_t
unsigned int uint32_t
int fr_minmax_heap_insert(fr_minmax_heap_t *hp, void *data)
void * fr_minmax_heap_iter_next(fr_minmax_heap_t *hp, fr_minmax_heap_iter_t *iter)
Get the next entry in a minmax heap.
void * fr_minmax_heap_min_peek(fr_minmax_heap_t *hp)
void * fr_minmax_heap_max_peek(fr_minmax_heap_t *hp)
unsigned int fr_minmax_heap_num_elements(fr_minmax_heap_t *hp)
Return the number of elements in the minmax heap.
void * fr_minmax_heap_iter_init(fr_minmax_heap_t *hp, fr_minmax_heap_iter_t *iter)
Iterate over entries in a minmax heap.
int fr_minmax_heap_extract(fr_minmax_heap_t *hp, void *data)
unsigned int fr_minmax_heap_iter_t
Definition minmax_heap.h:38
#define fr_minmax_heap_talloc_alloc(_ctx, _cmp, _talloc_type, _field, _init)
Creates a minmax heap that verifies elements are of a specific talloc type.
Definition minmax_heap.h:85
int8_t fr_pointer_cmp(void const *a, void const *b)
Compares two pointers.
Definition misc.c:417
static int8_t request_prioritise(void const *one, void const *two)
Definition bio.c:1034
#define fr_assert(_expr)
Definition rad_assert.h:38
static bool done
Definition radclient.c:81
#define RDEBUG(fmt,...)
Definition radclient.h:53
#define DEBUG2(fmt,...)
Definition radclient.h:43
#define WARN(fmt,...)
Definition radclient.h:47
#define INFO(fmt,...)
Definition radict.c:54
static fr_event_list_t * events
Definition radsniff.c:59
static rs_t * conf
Definition radsniff.c:53
void connection_signal_shutdown(connection_t *conn)
Shuts down a connection gracefully.
int connection_del_watch_post(connection_t *conn, connection_state_t state, connection_watch_t watch)
Remove a watch function from a post list.
Definition connection.c:472
void connection_signal_halt(connection_t *conn)
Shuts down a connection ungracefully.
void connection_signals_resume(connection_t *conn)
Resume processing of deferred signals.
Definition connection.c:319
void connection_signal_reconnect(connection_t *conn, connection_reason_t reason)
Asynchronously signal the connection should be reconnected.
void connection_signal_init(connection_t *conn)
Asynchronously signal a halted connection to start.
connection_t * connection_alloc(TALLOC_CTX *ctx, fr_event_list_t *el, connection_funcs_t const *funcs, connection_conf_t const *conf, char const *log_prefix, void const *uctx)
Allocate a new connection.
connection_watch_entry_t * connection_add_watch_pre(connection_t *conn, connection_state_t state, connection_watch_t watch, bool oneshot, void const *uctx)
Add a callback to be executed before a state function has been called.
Definition connection.c:510
connection_watch_entry_t * connection_add_watch_post(connection_t *conn, connection_state_t state, connection_watch_t watch, bool oneshot, void const *uctx)
Add a callback to be executed after a state function has been called.
Definition connection.c:532
int connection_del_watch_pre(connection_t *conn, connection_state_t state, connection_watch_t watch)
Remove a watch function from a pre list.
Definition connection.c:455
void connection_signals_pause(connection_t *conn)
Pause processing of deferred signals.
Definition connection.c:310
static fr_time_t test_time_base
Definition slab_tests.c:42
return count
Definition module.c:155
fr_time_t test_time
Definition state_test.c:3
#define fr_time()
Allow us to arbitrarily manipulate time.
Definition state_test.c:8
@ memory_order_relaxed
Definition stdatomic.h:127
#define atomic_fetch_add_explicit(object, operand, order)
Definition stdatomic.h:302
#define ATOMIC_VAR_INIT(value)
Definition stdatomic.h:88
Definition log.h:96
#define fr_table_str_by_value(_table, _number, _def)
Convert an integer to a string.
Definition table.h:772
An element in a table indexed by bit position.
Definition table.h:83
An element in an arbitrarily ordered array of name to num mappings.
Definition table.h:57
#define talloc_get_type_abort_const
Definition talloc.h:282
#define talloc_pooled_object(_ctx, _type, _num_subobjects, _total_subobjects_size)
Definition talloc.h:180
#define fr_time_gteq(_a, _b)
Definition time.h:238
#define fr_time_delta_wrap(_time)
Definition time.h:152
#define fr_time_wrap(_time)
Definition time.h:145
#define fr_time_lteq(_a, _b)
Definition time.h:240
#define fr_time_delta_ispos(_a)
Definition time.h:290
#define fr_time_add(_a, _b)
Add a time/time delta together.
Definition time.h:196
#define fr_time_gt(_a, _b)
Definition time.h:237
#define fr_time_sub(_a, _b)
Subtract one time from another.
Definition time.h:229
#define fr_time_lt(_a, _b)
Definition time.h:239
#define fr_time_delta_gt(_a, _b)
Definition time.h:283
"server local" time.
Definition time.h:69
An event timer list.
Definition timer.c:49
A timer event.
Definition timer.c:75
#define FR_TIMER_DELETE(_ev_p)
Definition timer.h:102
#define FR_TIMER_DELETE_RETURN(_ev_p)
Definition timer.h:109
#define fr_timer_in(...)
Definition timer.h:86
#define FR_TIMER_DISARM(_ev)
Definition timer.h:90
bool trunk_search(trunk_t *trunk, void *ptr)
Definition trunk.c:5158
static atomic_uint_fast64_t request_counter
Definition trunk.c:54
static void trunk_connection_enter_active(trunk_connection_t *tconn)
Transition a connection back to the active state.
Definition trunk.c:3247
#define CONN_REORDER(_tconn)
Reorder the connections in the active heap.
Definition trunk.c:775
static size_t trunk_req_trigger_names_len
Definition trunk.c:357
int trunk_connection_pop_cancellation(trunk_request_t **treq_out, trunk_connection_t *tconn)
Pop a cancellation request off a connection's cancellation queue.
Definition trunk.c:3834
fr_dlist_head_t cancel
Requests in the cancel state.
Definition trunk.c:161
int trunk_connection_manage_schedule(trunk_t *trunk)
Schedule a trunk management event for the next time the event loop is executed.
Definition trunk.c:4828
#define REQUEST_EXTRACT_SENT(_treq)
Remove the current request from the sent list.
Definition trunk.c:745
static void _trunk_connection_on_shutdown(UNUSED connection_t *conn, UNUSED connection_state_t prev, UNUSED connection_state_t state, void *uctx)
Connection transitioned to the shutdown state.
Definition trunk.c:3397
struct trunk_watch_entry_s trunk_watch_entry_t
An entry in a trunk watch function list.
fr_dlist_head_t reapable
Idle request.
Definition trunk.c:159
fr_heap_t * pending
Requests waiting to be sent.
Definition trunk.c:153
trunk_conf_t conf
Trunk common configuration.
Definition trunk.c:206
static size_t trunk_connection_states_len
Definition trunk.c:414
#define REQUEST_EXTRACT_REAPABLE(_treq)
Remove the current request from the reapable list.
Definition trunk.c:750
trunk_connection_t * tconn
The request was associated with.
Definition trunk.c:82
void trunk_connection_callback_readable(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
Standard I/O read function.
Definition trunk.c:4013
fr_rate_limit_t limit_last_failure_log
Rate limit on "Refusing to enqueue requests - No active conns".
Definition trunk.c:280
void trunk_verify(char const *file, int line, trunk_t *trunk)
Verify a trunk.
Definition trunk.c:5026
fr_timer_t * manage_ev
Periodic connection management event.
Definition trunk.c:272
#define IN_HANDLER(_trunk)
Definition trunk.c:705
static fr_table_num_ordered_t const trunk_connection_states[]
Definition trunk.c:402
void trunk_reconnect(trunk_t *trunk, int states, connection_reason_t reason)
Force the trunk to re-establish its connections.
Definition trunk.c:4724
void trunk_connection_callback_writable(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
Standard I/O write function.
Definition trunk.c:4030
void * uctx
User data to pass to the function.
Definition trunk.c:191
static void trunk_request_enter_pending(trunk_request_t *treq, trunk_connection_t *tconn, bool new)
Transition a request to the pending state, adding it to the backlog of an active connection.
Definition trunk.c:1153
static void trunk_request_remove_from_conn(trunk_request_t *treq)
Remove a request from all connection lists.
Definition trunk.c:962
fr_rate_limit_t limit_max_requests_alloc_log
Rate limit on "Refusing to alloc requests - Limit of * requests reached".
Definition trunk.c:278
trunk_request_state_t to
What state we transitioned to.
Definition trunk.c:80
static int8_t _trunk_request_prioritise(void const *a, void const *b)
Compare two protocol requests.
Definition trunk.c:938
static void trunk_manage(trunk_t *trunk, fr_time_t now)
Implements the algorithm we use to manage requests per connection levels.
Definition trunk.c:4162
static int _trunk_connection_free(trunk_connection_t *tconn)
Free a connection.
Definition trunk.c:3679
trunk_io_funcs_t funcs
I/O functions.
Definition trunk.c:258
fr_dlist_head_t draining
Connections that will be freed once all their requests are complete, but can be reactivated.
Definition trunk.c:243
#define REQUEST_EXTRACT_CANCEL_PARTIAL(_treq)
Remove the current request from the cancel_partial slot.
Definition trunk.c:760
int trunk_start(trunk_t *trunk)
Start the trunk running.
Definition trunk.c:4763
void trunk_request_signal_partial(trunk_request_t *treq)
Signal a partial write.
Definition trunk.c:2028
void trunk_request_signal_fail(trunk_request_t *treq)
Signal that a trunk request failed.
Definition trunk.c:2131
#define TREQ_OPTION_SEARCH(_option)
void trunk_request_signal_cancel_sent(trunk_request_t *treq)
Signal that a remote server has been notified of the cancellation.
Definition trunk.c:2259
static void trunk_connection_enter_draining_to_free(trunk_connection_t *tconn)
Transition a connection to the draining-to-reconnect state.
Definition trunk.c:3211
trunk_watch_t func
Function to call when a trunk enters.
Definition trunk.c:187
void trunk_connection_signal_readable(trunk_connection_t *tconn)
Signal that a trunk connection is readable.
Definition trunk.c:3920
#define DO_REQUEST_FREE(_treq)
Call the free callback (if set)
Definition trunk.c:597
trunk_request_t * trunk_request_alloc(trunk_t *trunk, request_t *request)
(Pre-)Allocate a new trunk request
Definition trunk.c:2473
static void _trunk_connection_on_halted(UNUSED connection_t *conn, UNUSED connection_state_t prev, UNUSED connection_state_t state, void *uctx)
Connection transitioned to the halted state.
Definition trunk.c:3631
#define REQUEST_EXTRACT_BACKLOG(_treq)
Remove the current request from the backlog.
Definition trunk.c:716
fr_heap_index_t heap_id
Used to track the connection in the connected heap.
Definition trunk.c:138
fr_dlist_head_t closed
Connections that have closed.
Definition trunk.c:240
fr_dlist_head_t watch[TRUNK_STATE_MAX]
To be called when trunk changes state.
Definition trunk.c:264
static void trunk_watch_call(trunk_t *trunk, fr_dlist_head_t *list, trunk_state_t state)
Call a list of watch functions associated with a state.
Definition trunk.c:788
static void trunk_request_enter_cancel_complete(trunk_request_t *treq)
Cancellation was acked, the request is complete, free it.
Definition trunk.c:1503
int line
Line change occurred on.
Definition trunk.c:92
static void trunk_connection_enter_inactive_draining(trunk_connection_t *tconn)
Transition a connection to the inactive-draining state.
Definition trunk.c:3149
#define CONN_STATE_TRANSITION(_new, _log)
Definition trunk.c:440
static uint64_t trunk_requests_per_connection(uint16_t *conn_count_out, uint32_t *req_conn_out, trunk_t *trunk, fr_time_t now, NDEBUG_UNUSED bool verify)
Update timestamps for when we last had a transition from above target to below target or vice versa.
Definition trunk.c:4570
static size_t trunk_connection_events_len
Definition trunk.c:430
static void _trunk_connection_on_failed(connection_t *conn, connection_state_t prev, connection_state_t state, void *uctx)
Connection failed.
Definition trunk.c:3579
bool oneshot
Remove the function after it's called once.
Definition trunk.c:189
bool started
Has the trunk been started.
Definition trunk.c:289
static size_t trunk_states_len
Definition trunk.c:400
#define TCONN_DLIST_VERIFY(_dlist, _state)
#define IO_FUNC_VERIFY(_func)
uint32_t trunk_request_count_by_connection(trunk_connection_t const *tconn, int req_state)
Return the count number of requests associated with a trunk connection.
Definition trunk.c:2878
uint64_t last_req_per_conn
The last request to connection ratio we calculated.
Definition trunk.c:294
#define DO_REQUEST_COMPLETE(_treq)
Call the complete callback (if set)
Definition trunk.c:558
static void trunk_connection_auto_full(trunk_connection_t *tconn)
Automatically mark a connection as inactive.
Definition trunk.c:2897
static void trunk_connection_remove(trunk_connection_t *tconn)
Remove a trunk connection from whichever list it's currently in.
Definition trunk.c:3048
#define TRUNK_REQUEST_STATE_LOG_MAX
The maximum number of state logs to record per request.
Definition trunk.c:71
static void trunk_connection_writable(trunk_connection_t *tconn)
A connection is writable.
Definition trunk.c:2961
#define OVER_MAX_CHECK
trunk_connection_event_t events
The current events we expect to be notified on.
Definition trunk.c:147
trunk_watch_entry_t * trunk_add_watch(trunk_t *trunk, trunk_state_t state, trunk_watch_t watch, bool oneshot, void const *uctx)
Add a watch entry to the trunk state list.
Definition trunk.c:864
static int _trunk_free(trunk_t *trunk)
Free a trunk, gracefully closing all connections.
Definition trunk.c:4861
fr_dlist_head_t failed
Connections that'll be reconnected shortly.
Definition trunk.c:238
static void trunk_rebalance(trunk_t *trunk)
Rebalance connections across active trunk members when a new connection becomes active.
Definition trunk.c:4098
static void trunk_backlog_drain(trunk_t *trunk)
Drain the backlog of as many requests as possible.
Definition trunk.c:4669
#define DO_REQUEST_CANCEL(_treq, _reason)
Call the cancel callback if set.
Definition trunk.c:519
static int8_t _trunk_connection_order_by_shortest_queue(void const *one, void const *two)
Order connections by queue depth.
Definition trunk.c:4844
struct trunk_request_pub_s pub
Public fields in the trunk request.
Definition trunk.c:100
trunk_t * trunk_alloc(TALLOC_CTX *ctx, fr_event_list_t *el, trunk_io_funcs_t const *funcs, trunk_conf_t const *conf, char const *log_prefix, void const *uctx, bool delay_start)
Allocate a new collection of connections.
Definition trunk.c:4945
#define TCONN_MINMAX_HEAP_VERIFY(_heap, _state)
trunk_request_t * cancel_partial
Partially written cancellation request.
Definition trunk.c:163
#define TCONN_MINMAX_HEAP_SEARCH(_heap)
uint64_t trunk_connection_requests_requeue(trunk_connection_t *tconn, int states, uint64_t max, bool fail_bound)
Move requests off of a connection and requeue elsewhere.
Definition trunk.c:2009
bool enabled
Whether the watch entry is enabled.
Definition trunk.c:190
fr_time_t last_freed
Last time this request was freed.
Definition trunk.c:113
#define DO_REQUEST_CONN_RELEASE(_treq)
Call the "conn_release" callback (if set)
Definition trunk.c:540
#define TREQ_DLIST_SEARCH(_dlist)
#define REQUEST_EXTRACT_CANCEL(_treq)
Remove the current request from the cancel list.
Definition trunk.c:755
static bool trunk_connection_is_full(trunk_connection_t *tconn)
Return whether a trunk connection should currently be considered full.
Definition trunk.c:2920
struct trunk_pub_s pub
Public fields in the trunk connection.
Definition trunk.c:198
trunk_cancel_reason_t cancel_reason
Why this request was cancelled.
Definition trunk.c:111
#define REQUEST_BAD_STATE_TRANSITION(_new)
Definition trunk.c:485
trunk_enqueue_t trunk_request_enqueue_on_conn(trunk_request_t **treq_out, trunk_connection_t *tconn, request_t *request, void *preq, void *rctx, bool ignore_limits)
Enqueue additional requests on a specific connection.
Definition trunk.c:2740
static void _trunk_connection_on_closed(UNUSED connection_t *conn, UNUSED connection_state_t prev, UNUSED connection_state_t state, void *uctx)
Connection failed after it was connected.
Definition trunk.c:3508
static fr_table_num_ordered_t const trunk_connection_events[]
Definition trunk.c:424
trunk_enqueue_t trunk_request_enqueue(trunk_request_t **treq_out, trunk_t *trunk, request_t *request, void *preq, void *rctx)
Enqueue a request that needs data written to the trunk.
Definition trunk.c:2586
#define TCONN_DLIST_SEARCH(_dlist)
static void trunk_request_enter_unassigned(trunk_request_t *treq)
Transition a request to the unassigned state, in preparation for re-assignment.
Definition trunk.c:1056
struct trunk_request_s trunk_request_t
Definition trunk.c:33
void * in_handler
Which handler we're inside.
Definition trunk.c:260
bool freeing
Trunk is being freed, don't spawn new connections or re-enqueue.
Definition trunk.c:286
static fr_table_num_ordered_t const trunk_states[]
Definition trunk.c:395
static void trunk_connection_readable(trunk_connection_t *tconn)
A connection is readable.
Definition trunk.c:2951
#define IS_SERVICEABLE(_tconn)
Definition trunk.c:710
trunk_enqueue_t trunk_request_requeue(trunk_request_t *treq)
Re-enqueue a request on the same connection.
Definition trunk.c:2675
#define IS_PROCESSING(_tconn)
Definition trunk.c:711
#define RECONNECT_BY_STATE(_state, _list)
static void trunk_connection_enter_draining(trunk_connection_t *tconn)
Transition a connection to the draining state.
Definition trunk.c:3179
static fr_table_num_indexed_bit_pos_t const trunk_req_trigger_names[]
Map request states to trigger names.
Definition trunk.c:342
fr_dlist_t entry
Used to track the trunk request in the conn->sent or trunk->backlog request.
Definition trunk.c:108
static void trunk_connection_close_if_empty(trunk_t *trunk, fr_dlist_head_t *head)
Close connections in a particular connection list if they have no requests associated with them.
Definition trunk.c:4056
void trunk_request_signal_cancel_complete(trunk_request_t *treq)
Signal that a remote server acked our cancellation.
Definition trunk.c:2283
static trunk_enqueue_t trunk_request_check_enqueue(trunk_connection_t **tconn_out, trunk_t *trunk, request_t *request)
Check to see if a trunk request can be enqueued.
Definition trunk.c:1601
#define DO_REQUEST_MUX(_tconn)
Write one or more requests to a connection.
Definition trunk.c:615
#define REQUEST_EXTRACT_PARTIAL(_treq)
Remove the current request from the partial slot.
Definition trunk.c:736
fr_dlist_head_t sent
Sent request.
Definition trunk.c:157
static void trunk_request_enter_partial(trunk_request_t *treq)
Transition a request to the partial state, indicating that is has been partially sent.
Definition trunk.c:1224
fr_timer_t * lifetime_ev
Maximum time this connection can be open.
Definition trunk.c:178
int trunk_connection_pop_request(trunk_request_t **treq_out, trunk_connection_t *tconn)
Pop a request off a connection's pending queue.
Definition trunk.c:3882
fr_dlist_head_t connecting
Connections which are not yet in the open state.
Definition trunk.c:224
#define TRUNK_STATE_TRANSITION(_new)
Definition trunk.c:884
void trunk_request_signal_cancel(trunk_request_t *treq)
Cancel a trunk request.
Definition trunk.c:2151
void trunk_request_state_log_entry_add(char const *function, int line, trunk_request_t *treq, trunk_request_state_t new)
Definition trunk.c:2796
static int trunk_connection_spawn(trunk_t *trunk, fr_time_t now)
Attempt to spawn a new connection.
Definition trunk.c:3747
int trunk_del_watch(trunk_t *trunk, trunk_state_t state, trunk_watch_t watch)
Remove a watch function from a trunk state list.
Definition trunk.c:830
static void _trunk_timer(fr_timer_list_t *tl, fr_time_t now, void *uctx)
Event to periodically call the connection management function.
Definition trunk.c:4499
struct trunk_connection_pub_s pub
Public fields in the trunk connection.
Definition trunk.c:134
static void trunk_request_enter_reapable(trunk_request_t *treq)
Transition a request to the reapable state, indicating that it's been sent in its entirety,...
Definition trunk.c:1310
uint16_t trunk_connection_count_by_state(trunk_t *trunk, int conn_state)
Return the count number of connections in the specified states.
Definition trunk.c:2854
#define IN_REQUEST_DEMUX(_trunk)
Definition trunk.c:707
#define DO_REQUEST_FAIL(_treq, _prev_state)
Call the fail callback (if set)
Definition trunk.c:577
static void trunk_request_enter_cancel(trunk_request_t *treq, trunk_cancel_reason_t reason)
Transition a request to the cancel state, placing it in a connection's cancellation list.
Definition trunk.c:1372
static trunk_enqueue_t trunk_request_enqueue_existing(trunk_request_t *treq)
Enqueue a request which has never been assigned to a connection or was previously cancelled.
Definition trunk.c:1679
bool managing_connections
Whether the trunk is allowed to manage (open/close) connections.
Definition trunk.c:291
#define DO_CONNECTION_ALLOC(_tconn)
Allocate a new connection.
Definition trunk.c:666
char const * function
State change occurred in.
Definition trunk.c:91
static size_t trunk_request_states_len
Definition trunk.c:375
fr_dlist_head_t init
Connections which have not yet started connecting.
Definition trunk.c:221
fr_dlist_head_t * log_head
To allow the log entry to remove itself on free.
Definition trunk.c:77
static void trunk_request_enter_cancel_partial(trunk_request_t *treq)
Transition a request to the cancel_partial state, placing it in a connection's cancel_partial slot.
Definition trunk.c:1423
static void _trunk_connection_on_connected(UNUSED connection_t *conn, UNUSED connection_state_t prev, UNUSED connection_state_t state, void *uctx)
Connection transitioned to the connected state.
Definition trunk.c:3449
fr_dlist_head_t to_free
Connections we're done with and will free on the next call to trunk_manage.
Definition trunk.c:249
trunk_request_t * partial
Partially written request.
Definition trunk.c:155
static void trunk_request_enter_failed(trunk_request_t *treq)
Request failed, inform the API client and free the request.
Definition trunk.c:1565
fr_minmax_heap_t * active
Connections which can service requests.
Definition trunk.c:226
conf_parser_t const trunk_config[]
Config parser definitions to populate a trunk_conf_t.
Definition trunk.c:314
static void trunk_request_enter_complete(trunk_request_t *treq)
Request completed successfully, inform the API client and free the request.
Definition trunk.c:1534
static void trunk_request_enter_sent(trunk_request_t *treq)
Transition a request to the sent state, indicating that it's been sent in its entirety.
Definition trunk.c:1254
#define DO_REQUEST_CANCEL_MUX(_tconn)
Write one or more cancellation requests to a connection.
Definition trunk.c:648
static void trunk_connection_enter_full(trunk_connection_t *tconn)
Transition a connection to the full state.
Definition trunk.c:3104
void trunk_request_free(trunk_request_t **treq_to_free)
If the trunk request is freed then update the target requests.
Definition trunk.c:2321
#define DO_REQUEST_DEMUX(_tconn)
Read one or more requests from a connection.
Definition trunk.c:632
static uint64_t trunk_connection_requests_dequeue(fr_dlist_head_t *out, trunk_connection_t *tconn, int states, uint64_t max)
Shift requests in the specified states onto new connections.
Definition trunk.c:1736
static int _trunk_request_free(trunk_request_t *treq)
Actually free the trunk request.
Definition trunk.c:2440
char const * log_prefix
What to prepend to messages.
Definition trunk.c:202
#define REQUEST_EXTRACT_PENDING(_treq)
Remove the current request from the pending list.
Definition trunk.c:726
static void _trunk_connection_lifetime_expire(UNUSED fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx)
Trigger a reconnection of the trunk connection.
Definition trunk.c:3431
static void trunk_connection_event_update(trunk_connection_t *tconn)
Update the registrations for I/O events we're interested in.
Definition trunk.c:2984
static conf_parser_t const trunk_config_request[]
Definition trunk.c:298
fr_dlist_head_t full
Connections which have too many outstanding requests.
Definition trunk.c:228
#define DEQUEUE_ALL(_src_list, _state)
static void trunk_request_enter_backlog(trunk_request_t *treq, bool new)
Transition a request to the backlog state, adding it to the backlog of the trunk.
Definition trunk.c:1091
static fr_table_num_ordered_t const trunk_request_states[]
Definition trunk.c:360
static void _trunk_connection_on_connecting(UNUSED connection_t *conn, UNUSED connection_state_t prev, UNUSED connection_state_t state, void *uctx)
Connection transitioned to the connecting state.
Definition trunk.c:3353
static fr_table_num_indexed_bit_pos_t const trunk_conn_trigger_names[]
Map connection states to trigger names.
Definition trunk.c:381
fr_dlist_head_t draining_to_free
Connections that will be freed once all their requests are complete.
Definition trunk.c:246
uint64_t id
Trunk request ID.
Definition trunk.c:104
uint64_t sent_count
The number of requests that have been sent using this connection.
Definition trunk.c:171
static void _trunk_connection_on_init(UNUSED connection_t *conn, UNUSED connection_state_t prev, UNUSED connection_state_t state, void *uctx)
Connection transitioned to the the init state.
Definition trunk.c:3318
#define DO_CONNECTION_NOTIFY(_tconn, _events)
Change what events the connection should be notified about.
Definition trunk.c:688
#define TREQ_DLIST_VERIFY(_dlist, _state)
fr_dlist_head_t inactive
Connections which have been signalled to be inactive by the API client.
Definition trunk.c:231
void trunk_connection_manage_stop(trunk_t *trunk)
Stop the trunk from opening and closing connections in response to load.
Definition trunk.c:4818
#define TREQ_HEAP_VERIFY(_heap, _state)
void trunk_connection_signal_active(trunk_connection_t *tconn)
Signal a trunk connection is no longer full.
Definition trunk.c:3959
fr_dlist_head_t log
State change log.
Definition trunk.c:123
uint64_t tconn_id
If the treq was associated with a connection the connection ID.
Definition trunk.c:85
fr_dlist_t entry
Used to track the connection in the connecting, full and failed lists.
Definition trunk.c:141
static void trunk_request_enter_cancel_sent(trunk_request_t *treq)
Transition a request to the cancel_sent state, placing it in a connection's cancel_sent list.
Definition trunk.c:1458
static void trunk_connection_enter_inactive(trunk_connection_t *tconn)
Transition a connection to the inactive state.
Definition trunk.c:3126
trunk_request_state_t from
What state we transitioned from.
Definition trunk.c:79
fr_dlist_head_t cancel_sent
Sent cancellation request.
Definition trunk.c:165
void trunk_connection_manage_start(trunk_t *trunk)
Allow the trunk to open and close connections in response to load.
Definition trunk.c:4807
fr_dlist_head_t inactive_draining
Connections which have been signalled to be inactive by the API client, which the trunk manager is dr...
Definition trunk.c:234
void trunk_connection_signal_inactive(trunk_connection_t *tconn)
Signal a trunk connection cannot accept more requests.
Definition trunk.c:3936
static int _state_log_entry_free(trunk_request_state_log_t *slog)
Used for sanity checks to ensure all log entries have been freed.
Definition trunk.c:2789
void trunk_connection_verify(char const *file, int line, trunk_connection_t *tconn)
Definition trunk.c:5087
fr_heap_t * backlog
The request backlog.
Definition trunk.c:211
#define IN_REQUEST_CANCEL_MUX(_trunk)
Definition trunk.c:708
void trunk_request_verify(char const *file, int line, trunk_request_t *treq)
Definition trunk.c:5147
uint64_t trunk_request_count_by_state(trunk_t *trunk, int conn_state, int req_state)
Return a count of requests on a connection in a specific state.
Definition trunk.c:4521
void trunk_request_signal_cancel_partial(trunk_request_t *treq)
Signal a partial cancel write.
Definition trunk.c:2235
void trunk_request_signal_sent(trunk_request_t *treq)
Signal that the request was written to a connection successfully.
Definition trunk.c:2049
#define COUNT_BY_STATE(_state, _list)
void * uctx
Uctx data to pass to alloc.
Definition trunk.c:262
#define TREQ_OPTION_VERIFY(_option, _state)
bool trunk_connection_search(trunk_connection_t *tconn, void *ptr)
Definition trunk.c:5203
#define CONN_BAD_STATE_TRANSITION(_new)
Definition trunk.c:451
fr_heap_index_t heap_id
Used to track the request conn->pending heap.
Definition trunk.c:106
#define REQUEST_STATE_TRANSITION(_new)
Record a request state transition and log appropriate output.
Definition trunk.c:474
trunk_watch_entry_t * next_watcher
Watcher about to be run. Used to prevent nested watchers.
Definition trunk.c:266
static uint64_t trunk_connection_requests_requeue_priv(trunk_connection_t *tconn, int states, uint64_t max, bool fail_bound)
Remove requests in specified states from a connection, attempting to distribute them to new connectio...
Definition trunk.c:1847
bool sent
Trunk request has been sent at least once.
Definition trunk.c:118
void trunk_request_signal_complete(trunk_request_t *treq)
Signal that a trunk request is complete.
Definition trunk.c:2093
static void trunk_connection_auto_unfull(trunk_connection_t *tconn)
Automatically mark a connection as active or reconnect it.
Definition trunk.c:2938
void trunk_connection_signal_reconnect(trunk_connection_t *tconn, connection_reason_t reason)
Signal a trunk connection is no longer viable.
Definition trunk.c:3998
void trunk_connection_signal_writable(trunk_connection_t *tconn)
Signal that a trunk connection is writable.
Definition trunk.c:3902
bool trunk_request_search(trunk_request_t *treq, void *ptr)
Definition trunk.c:5261
fr_dlist_t entry
List entry.
Definition trunk.c:186
static conf_parser_t const trunk_config_connection[]
Definition trunk.c:306
trunk_connection_state_t tconn_state
If the treq was associated with a connection the connection state at the time of the state transition...
Definition trunk.c:87
bool bound_to_conn
Fail the request if there's an attempt to re-enqueue it.
Definition trunk.c:115
static size_t trunk_cancellation_reasons_len
Definition trunk.c:422
static fr_table_num_ordered_t const trunk_cancellation_reasons[]
Definition trunk.c:416
static size_t trunk_conn_trigger_names_len
Definition trunk.c:393
fr_event_list_t * el
Event list used by this trunk and the connection.
Definition trunk.c:204
void trunk_request_state_log(fr_log_t const *log, fr_log_type_t log_type, char const *file, int line, trunk_request_t const *treq)
Definition trunk.c:2827
#define IN_REQUEST_MUX(_trunk)
Definition trunk.c:706
fr_dlist_head_t free_requests
Requests in the unassigned state.
Definition trunk.c:208
bool trunk_connection_in_state(trunk_connection_t *tconn, int state)
Returns true if the trunk connection is in one of the specified states.
Definition trunk.c:4046
#define TREQ_HEAP_SEARCH(_heap)
#define REQUEST_EXTRACT_CANCEL_SENT(_treq)
Remove the current request from the cancel sent list.
Definition trunk.c:769
fr_dlist_t entry
Entry in the linked list.
Definition trunk.c:78
void trunk_request_signal_reapable(trunk_request_t *treq)
Signal that the request was written to a connection successfully, but no response is expected.
Definition trunk.c:2071
Associates request queues with a connection.
Definition trunk.c:133
Wraps a normal request.
Definition trunk.c:99
Trace state machine changes for a particular request.
Definition trunk.c:76
Main trunk management handle.
Definition trunk.c:197
An entry in a trunk watch function list.
Definition trunk.c:185
uint16_t max
Maximum number of connections in the trunk.
Definition trunk.h:231
uint32_t max_req_per_conn
Maximum requests per connection.
Definition trunk.h:240
fr_time_t _CONST last_write_success
Last time we wrote to the connection.
Definition trunk.h:314
trunk_t *_CONST trunk
Trunk this request belongs to.
Definition trunk.h:347
bool backlog_on_failed_conn
Assign requests to the backlog when there are no available connections and the last connection event ...
Definition trunk.h:281
uint16_t min
Shouldn't let connections drop below this number.
Definition trunk.h:229
#define TRUNK_REQUEST_STATE_ALL
All request states.
Definition trunk.h:195
void *_CONST rctx
Resume ctx of the module.
Definition trunk.h:353
trunk_t *_CONST trunk
Trunk this connection belongs to.
Definition trunk.h:375
fr_heap_cmp_t connection_prioritise
Ordering function for connections.
Definition trunk.h:737
trunk_connection_state_t
Used for sanity checks and to track which list the connection is in.
Definition trunk.h:87
@ TRUNK_CONN_FULL
Connection is full and can't accept any more requests.
Definition trunk.h:95
@ TRUNK_CONN_CONNECTING
Connection is connecting.
Definition trunk.h:90
@ TRUNK_CONN_DRAINING
Connection will be closed once it has no more outstanding requests, if it's not reactivated.
Definition trunk.h:101
@ TRUNK_CONN_INACTIVE_DRAINING
Connection is inactive, can't accept any more requests, and will be closed once it has no more outsta...
Definition trunk.h:97
@ TRUNK_CONN_INACTIVE
Connection is inactive and can't accept any more requests.
Definition trunk.h:96
@ TRUNK_CONN_HALTED
Halted, ready to be freed.
Definition trunk.h:88
@ TRUNK_CONN_CLOSED
Connection was closed, either explicitly or due to failure.
Definition trunk.h:94
@ TRUNK_CONN_INIT
In the initial state.
Definition trunk.h:89
@ TRUNK_CONN_DRAINING_TO_FREE
Connection will be closed once it has no more outstanding requests.
Definition trunk.h:103
@ TRUNK_CONN_ACTIVE
Connection is connected and ready to service requests.
Definition trunk.h:91
unsigned req_pool_headers
How many chunk headers the talloc pool allocated with the treq should contain.
Definition trunk.h:266
request_t *_CONST request
The request that we're writing the data on behalf of.
Definition trunk.h:355
fr_time_t _CONST last_open
Last time the connection management function opened a connection.
Definition trunk.h:304
fr_time_delta_t idle_timeout
how long a connection can remain idle for
Definition trunk.h:250
trunk_connection_state_t _CONST state
What state the connection is in.
Definition trunk.h:367
size_t req_pool_size
The size of the talloc pool allocated with the treq.
Definition trunk.h:269
uint64_t max_uses
The maximum time a connection can be used.
Definition trunk.h:246
fr_time_delta_t lifetime
Time between reconnects.
Definition trunk.h:248
uint16_t connecting
Maximum number of connections that can be in the connecting state.
Definition trunk.h:233
uint64_t _CONST req_alloc_reused
How many requests were reused.
Definition trunk.h:328
uint32_t max_backlog
Maximum number of requests that can be in the backlog.
Definition trunk.h:244
fr_time_t _CONST last_failed
Last time a connection failed.
Definition trunk.h:312
trunk_request_state_t _CONST state
Which list the request is now located in.
Definition trunk.h:345
fr_time_t _CONST last_write_success
Last time we wrote to the connection.
Definition trunk.h:371
trunk_connection_t *_CONST tconn
Connection this request belongs to.
Definition trunk.h:349
trunk_connection_alloc_t connection_alloc
Allocate a new connection_t.
Definition trunk.h:733
fr_time_t _CONST last_read_success
Last time we read a response.
Definition trunk.h:316
fr_time_t _CONST last_below_target
Last time average utilisation went below the target value.
Definition trunk.h:301
fr_time_t _CONST last_read_success
Last time we read from the connection.
Definition trunk.h:373
fr_time_delta_t close_delay
How long we must be below target utilisation to close an existing connection.
Definition trunk.h:255
uint16_t start
How many connections to start.
Definition trunk.h:227
fr_time_delta_t req_cleanup_delay
How long must a request in the unassigned (free) list not have been used for before it's cleaned up a...
Definition trunk.h:259
#define TRUNK_REQUEST_STATE_CANCEL_ALL
All requests in various cancellation states.
Definition trunk.h:213
bool always_writable
Set to true if our ability to write requests to a connection handle is not dependent on the state of ...
Definition trunk.h:271
trunk_connection_event_t
What type of I/O events the trunk connection is currently interested in receiving.
Definition trunk.h:72
@ TRUNK_CONN_EVENT_BOTH
Trunk should be notified if a connection is readable or writable.
Definition trunk.h:79
@ TRUNK_CONN_EVENT_WRITE
Trunk should be notified if a connection is writable.
Definition trunk.h:77
@ TRUNK_CONN_EVENT_NONE
Don't notify the trunk on connection state changes.
Definition trunk.h:73
@ TRUNK_CONN_EVENT_READ
Trunk should be notified if a connection is readable.
Definition trunk.h:75
#define TRUNK_CONN_ALL
All connection states.
Definition trunk.h:111
fr_heap_cmp_t request_prioritise
Ordering function for requests.
Definition trunk.h:739
uint64_t _CONST req_alloc
The number of requests currently allocated that have not been freed or returned to the free list.
Definition trunk.h:322
trunk_cancel_reason_t
Reasons for a request being cancelled.
Definition trunk.h:55
@ TRUNK_CANCEL_REASON_NONE
Request has not been cancelled.
Definition trunk.h:56
@ TRUNK_CANCEL_REASON_SIGNAL
Request cancelled due to a signal.
Definition trunk.h:57
@ TRUNK_CANCEL_REASON_REQUEUE
A previously sent request is being requeued.
Definition trunk.h:59
@ TRUNK_CANCEL_REASON_MOVE
Request cancelled because it's being moved.
Definition trunk.h:58
uint64_t _CONST req_alloc_new
How many requests we've allocated.
Definition trunk.h:326
fr_time_delta_t open_delay
How long we must be above target utilisation to spawn a new connection.
Definition trunk.h:252
connection_t *_CONST conn
The underlying connection.
Definition trunk.h:369
trunk_state_t
Definition trunk.h:62
@ TRUNK_STATE_MAX
Definition trunk.h:66
@ TRUNK_STATE_PENDING
Trunk has connections, but none are active.
Definition trunk.h:65
@ TRUNK_STATE_ACTIVE
Trunk has active connections.
Definition trunk.h:64
@ TRUNK_STATE_IDLE
Trunk has no connections.
Definition trunk.h:63
fr_time_t _CONST last_closed
Last time the connection management function closed a connection.
Definition trunk.h:307
void(* trunk_watch_t)(trunk_t *trunk, trunk_state_t prev, trunk_state_t state, void *uctx)
Receive a notification when a trunk enters a particular state.
Definition trunk.h:724
fr_time_delta_t manage_interval
How often we run the management algorithm to open/close connections.
Definition trunk.h:263
trunk_enqueue_t
Definition trunk.h:148
@ TRUNK_ENQUEUE_DST_UNAVAILABLE
Destination is down.
Definition trunk.h:153
@ TRUNK_ENQUEUE_FAIL
General failure.
Definition trunk.h:154
@ TRUNK_ENQUEUE_OK
Operation was successful.
Definition trunk.h:150
@ TRUNK_ENQUEUE_NO_CAPACITY
At maximum number of connections, and no connection has capacity.
Definition trunk.h:151
@ TRUNK_ENQUEUE_IN_BACKLOG
Request should be enqueued in backlog.
Definition trunk.h:149
void *_CONST preq
Data for the muxer to write to the connection.
Definition trunk.h:351
uint32_t target_req_per_conn
How many pending requests should ideally be running on each connection.
Definition trunk.h:236
fr_time_t _CONST last_connected
Last time a connection connected.
Definition trunk.h:310
trunk_request_cancel_mux_t request_cancel_mux
!< Read one or more requests from a connection.
Definition trunk.h:746
trunk_request_state_t
Used for sanity checks and to simplify freeing.
Definition trunk.h:161
@ TRUNK_REQUEST_STATE_PARTIAL
Some of the request was written to the socket, more of it should be written later.
Definition trunk.h:170
@ TRUNK_REQUEST_STATE_REAPABLE
Request has been written, needs to persist, but we are not currently waiting for any response.
Definition trunk.h:173
@ TRUNK_REQUEST_STATE_UNASSIGNED
Transition state - Request currently not assigned to any connection.
Definition trunk.h:165
@ TRUNK_REQUEST_STATE_INIT
Initial state.
Definition trunk.h:162
@ TRUNK_REQUEST_STATE_CANCEL_SENT
We've informed the remote server that the request has been cancelled.
Definition trunk.h:185
@ TRUNK_REQUEST_STATE_COMPLETE
The request is complete.
Definition trunk.h:182
@ TRUNK_REQUEST_STATE_FAILED
The request failed.
Definition trunk.h:183
@ TRUNK_REQUEST_STATE_CANCEL
A request on a particular socket was cancel.
Definition trunk.h:184
@ TRUNK_REQUEST_STATE_CANCEL_PARTIAL
We partially wrote a cancellation request.
Definition trunk.h:187
@ TRUNK_REQUEST_STATE_BACKLOG
In the backlog.
Definition trunk.h:167
@ TRUNK_REQUEST_STATE_CANCEL_COMPLETE
Remote server has acknowledged our cancellation.
Definition trunk.h:188
@ TRUNK_REQUEST_STATE_PENDING
In the queue of a connection and is pending writing.
Definition trunk.h:168
@ TRUNK_REQUEST_STATE_SENT
Was written to a socket. Waiting for a response.
Definition trunk.h:172
trunk_state_t _CONST state
Current state of the trunk.
Definition trunk.h:333
fr_time_t _CONST last_above_target
Last time average utilisation went above the target value.
Definition trunk.h:298
Common configuration parameters for a trunk.
Definition trunk.h:224
Public fields for the trunk connection.
Definition trunk.h:366
I/O functions to pass to trunk_alloc.
Definition trunk.h:732
Public fields for the trunk.
Definition trunk.h:294
Public fields for the trunk request.
Definition trunk.h:344
close(uq->fd)
static fr_event_list_t * el
static fr_slen_t head
Definition xlat.h:416
char const * fr_strerror(void)
Get the last library error.
Definition strerror.c:553
#define fr_box_time_delta(_val)
Definition value.h:354
int nonnull(2, 5))
static size_t char ** out
Definition value.h:1012