The FreeRADIUS server $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
Loading...
Searching...
No Matches
trunk_tests.c
Go to the documentation of this file.
1#include <freeradius-devel/util/acutest.h>
2#include <freeradius-devel/util/acutest_helpers.h>
3#include <freeradius-devel/util/syserror.h>
4#include <sys/types.h>
5#include <sys/socket.h>
6
7#define TRUNK_TESTS 1
8#include "trunk.c"
9
10//#include <gperftools/profiler.h>
11typedef struct {
12 trunk_request_t *treq; //!< Trunk request.
13 bool cancelled; //!< Seen by the cancelled callback.
14 bool completed; //!< Seen by the complete callback.
15 bool failed; //!< Seen by the failed callback.
16 bool freed; //!< Seen by the free callback.
17 bool signal_partial; //!< Muxer should signal that this request is partially written.
18 bool signal_cancel_partial; //!< Muxer should signal that this request is partially cancelled.
19 int priority; //!< Priority of request
21
22typedef struct {
23 uint64_t cancelled; //!< Count of tests in this run that were cancelled.
24 uint64_t completed; //!< Count of tests in this run that completed.
25 uint64_t failed; //!< Count of tests in this run that failed.
26 uint64_t freed; //!< Count of tests in this run that were freed.
28
29#define DEBUG_LVL_SET if (acutest_verbose_level_ >= 3) fr_debug_lvl = L_DBG_LVL_4 + 1
30
31static void test_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
32{
33 trunk_request_t *treq;
34 size_t count = 0;
35 int fd = *(talloc_get_type_abort(conn->h, int));
36 ssize_t slen;
37
38 while (trunk_connection_pop_request(&treq, tconn) == 0) {
39 test_proto_request_t *preq = treq->pub.preq;
40 count++;
41
42 /*
43 * Simulate a partial write
44 */
45 if (preq && preq->signal_partial) {
47 preq->signal_partial = false;
48 break;
49 }
50
51 if (acutest_verbose_level_ >= 3) printf("%s - Wrote %p\n", __FUNCTION__, preq);
52
53 slen = write(fd, &preq, sizeof(preq));
54 if (slen < 0) return;
55 if (slen == 0) return;
56 if (slen < (ssize_t)sizeof(preq)) abort();
57
59 }
60 TEST_CHECK(count > 0);
61}
62
64{
65 trunk_request_t *treq;
66 size_t count = 0;
67 int fd = *(talloc_get_type_abort(conn->h, int));
68 ssize_t slen;
69
70 /*
71 * For cancellation we just do
72 */
73 while ((trunk_connection_pop_cancellation(&treq, tconn) == 0)) {
74 test_proto_request_t *preq = treq->pub.preq;
75 count++;
76
77 /*
78 * Simulate a partial cancel write
79 */
80 if (preq && preq->signal_cancel_partial) {
82 preq->signal_cancel_partial = false;
83 break;
84 }
85
86 if (acutest_verbose_level_ >= 3) printf("%s - Wrote %p\n", __FUNCTION__, preq);
87 slen = write(fd, &preq, sizeof(preq));
88 if (slen < 0) {
89 fr_perror("%s - %s", __FUNCTION__, fr_syserror(errno));
90 return;
91 }
92 if (slen == 0) return;
93 if (slen < (ssize_t)sizeof(preq)) abort();
94
96 }
97 TEST_CHECK(count > 0);
98}
99
101{
102 int fd = *(talloc_get_type_abort(conn->h, int));
104 ssize_t slen;
105
106 for (;;) {
107 slen = read(fd, &preq, sizeof(preq));
108 if (slen <= 0) break;
109
110 if (acutest_verbose_level_ >= 3) printf("%s - Read %p (%zu)\n", __FUNCTION__, preq, (size_t)slen);
111
112 /*
113 * Coverity considers data read from a file to be tainted,
114 * and considers its use to be a defect--but almost all the
115 * rest of the loop validates the pointer to the extent
116 * possible--all of the pointer should be read, its talloc
117 * "dynamic type" had better be right, and it should either
118 * be freed or have a statethe demuxer can handle or ignore.
119 * This isn't like a range check on a numeric value;
120 * Coverity doesn't recognize it as validation.
121 */
122 TEST_CHECK(slen == sizeof(preq));
123 talloc_get_type_abort(preq, test_proto_request_t);
124
125 if (preq->freed) continue;
126
127 /*
128 * Demuxer can handle both normal requests and cancelled ones
129 */
130 switch (preq->treq->pub.state) {
132 break; /* Hack - just ignore it */
133
135 /* coverity[tainted_data] */
137 break;
138
140 /* coverity[tainted_data] */
142 break;
143
144 default:
145 fr_assert(0);
146 break;
147 }
148 }
149}
150
151static void _conn_io_error(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags,
152 UNUSED int fd_errno, void *uctx)
153{
154
155 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
156
158}
159
160static void _conn_io_read(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
161{
162 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
164}
165
166static void _conn_io_write(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
167{
168 trunk_connection_t *tconn = talloc_get_type_abort(uctx, trunk_connection_t);
170}
171
174 trunk_connection_event_t notify_on, UNUSED void *uctx)
175{
176 int fd = *(talloc_get_type_abort(conn->h, int));
177
178 switch (notify_on) {
181 break;
182
184 TEST_CHECK(fr_event_fd_insert(conn, NULL, el, fd, _conn_io_read, NULL, _conn_io_error, tconn) == 0);
185 break;
186
188 TEST_CHECK(fr_event_fd_insert(conn, NULL, el, fd, NULL, _conn_io_write, _conn_io_error, tconn) == 0);
189 break;
190
193 break;
194
195 default:
196 fr_assert(0);
197 }
198}
199
200static void test_request_cancel(UNUSED connection_t *conn, void *preq,
201 UNUSED trunk_cancel_reason_t reason, void *uctx)
202{
203 test_proto_stats_t *stats = uctx;
204 test_proto_request_t *our_preq;
205
206 if (!preq) return;
207
208 our_preq = talloc_get_type_abort(preq, test_proto_request_t);
209 our_preq->cancelled = true;
210 if (stats) stats->cancelled++;
211}
212
213static void test_request_complete(UNUSED request_t *request, void *preq, UNUSED void *rctx, void *uctx)
214{
215 test_proto_stats_t *stats = uctx;
216 test_proto_request_t *our_preq;
217
218 if (!preq) return;
219
220 our_preq = talloc_get_type_abort(preq, test_proto_request_t);
221 our_preq->completed = true;
222 if (stats) stats->completed++;
223}
224
225static void test_request_fail(UNUSED request_t *request, void *preq, UNUSED void *rctx, UNUSED trunk_request_state_t state, void *uctx)
226{
227 test_proto_stats_t *stats = uctx;
228 test_proto_request_t *our_preq;
229
230 if (!preq) return;
231
232 our_preq = talloc_get_type_abort(preq, test_proto_request_t);
233 our_preq->failed = true;
234 if (stats) stats->failed++;
235}
236
237static void test_request_free(UNUSED request_t *request, void *preq, void *uctx)
238{
239 test_proto_stats_t *stats = uctx;
240 test_proto_request_t *our_preq;
241
242 if (!preq) return;
243
244 our_preq = talloc_get_type_abort(preq, test_proto_request_t);
245 our_preq->freed = true;
246 if (stats) stats->freed++;
247}
248
249/** Whenever the second socket in a socket pair is readable, read all pending data, and write it back
250 *
251 */
252static void _conn_io_loopback(UNUSED fr_event_list_t *el, int fd, UNUSED int flags, void *uctx)
253{
254 int *our_h = talloc_get_type_abort(uctx, int);
255 static uint8_t buff[1024];
256 static size_t to_write;
257 ssize_t slen;
258
259 fr_assert(fd == our_h[1]);
260
261 while (true) {
262 slen = read(fd, buff, sizeof(buff));
263 if (slen <= 0) return;
264
265 to_write = (size_t)slen;
266
267 if (acutest_verbose_level_ >= 3) printf("%s - Read %zu bytes of data\n", __FUNCTION__, slen);
268 slen = write(our_h[1], buff, (size_t)to_write);
269 if (slen < 0) return;
270
271 if (slen < (ssize_t)to_write) {
272 to_write -= slen;
273 if (acutest_verbose_level_ >= 3) {
274 printf("%s - Partial write %zu bytes left\n", __FUNCTION__, to_write);
275 }
276 return;
277 } else {
278 if (acutest_verbose_level_ >= 3) printf("%s - Wrote %zu bytes of data\n", __FUNCTION__, slen);
279 }
280 }
281}
282
283static void _conn_close(UNUSED fr_event_list_t *el, void *h, UNUSED void *uctx)
284{
285 int *our_h = talloc_get_type_abort(h, int);
286
287 talloc_free_children(our_h); /* Clear the IO handlers */
288
289 close(our_h[0]);
290 close(our_h[1]);
291
292 talloc_free(our_h);
293}
294
295/** Insert I/O handlers that loop any data back round
296 *
297 */
299{
300 int *our_h = talloc_get_type_abort(h, int);
301
302 /*
303 * This always needs to be inserted
304 */
305 TEST_CHECK(fr_event_fd_insert(our_h, NULL, el, our_h[1], _conn_io_loopback, NULL, NULL, our_h) == 0);
306
308}
309
310/** Allocate a basic socket pair
311 *
312 */
313CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private connection_t trips --fsanitize=function*/
314static connection_state_t _conn_init(void **h_out, connection_t *conn, UNUSED void *uctx)
315{
316 int *h;
317
318 h = talloc_array(conn, int, 2);
319 socketpair(AF_UNIX, SOCK_STREAM, 0, h);
320
321 fr_nonblock(h[0]);
322 fr_nonblock(h[1]);
323 connection_signal_on_fd(conn, h[0]);
324 *h_out = h;
325
327}
328
331 connection_conf_t const *conn_conf,
332 char const *log_prefix, UNUSED void *uctx)
333{
334 connection_conf_t cstat;
335
336 if (!conn_conf) {
337 memset(&cstat, 0, sizeof(cstat));
338 conn_conf = &cstat;
339 }
340 return connection_alloc(tconn, el,
342 .init = _conn_init,
343 .open = _conn_open,
344 .close = _conn_close
345 },
346 conn_conf,
347 log_prefix, tconn);
348}
349
350static int8_t test_preq_cmp(void const *a, void const *b)
351{
352 test_proto_request_t const *preq_a = a;
353 test_proto_request_t const *preq_b = b;
354 return CMP(preq_a->priority, preq_b->priority);
355}
356
357static trunk_t *test_setup_trunk(TALLOC_CTX *ctx, fr_event_list_t *el, trunk_conf_t *conf, bool with_cancel_mux, void *uctx)
358{
361 .connection_notify = _conn_notify,
362 .request_prioritise = test_preq_cmp,
363 .request_mux = test_mux,
364 .request_demux = test_demux,
365 .request_cancel = test_request_cancel,
366 .request_complete = test_request_complete,
367 .request_fail = test_request_fail,
368 .request_free = test_request_free
369 };
370
371 /*
372 * Function list is copied, so this is OK.
373 */
374 if (with_cancel_mux) io_funcs.request_cancel_mux = test_cancel_mux;
375
376 return trunk_alloc(ctx, el, &io_funcs, conf, "test_socket_pair", uctx, false);
377}
378
380{
381 TALLOC_CTX *ctx = talloc_init_const("test");
382 trunk_t *trunk;
384 int events;
385
387 .start = 2,
388 .min = 2
389 };
392 .request_prioritise = fr_pointer_cmp,
393 };
394
396
397 el = fr_event_list_alloc(ctx, NULL, NULL);
398
400
401 trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
402 TEST_CHECK(trunk != NULL);
403 if (!trunk) return;
404
407 TEST_CHECK(events == 2); /* Two I/O write events, no timers */
410
412 TEST_CHECK(events == 0); /* I/O events should have been cleared */
413
414 talloc_free(trunk);
415 talloc_free(ctx);
416}
417
419{
420 TALLOC_CTX *ctx = talloc_init_const("test");
421 trunk_t *trunk;
423 int events;
425 .start = 2,
426 .min = 2,
427 .conn_conf = &(connection_conf_t){
428 .reconnection_delay = fr_time_delta_from_nsec(NSEC / 2)
429 }
430 };
433 .request_prioritise = fr_pointer_cmp,
434 };
436
437 el = fr_event_list_alloc(ctx, NULL, NULL);
438
439 if (!el) return;
440
442
443 trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
444 TEST_CHECK(trunk != NULL);
445 if (!trunk) return;
446
448 TEST_CHECK(events == 2); /* Two I/O write events, no timers */
452
454 TEST_CHECK(events == 0); /* I/O events should have been cleared */
455 TEST_MSG("Got %u events", events);
456
458
461 TEST_CHECK(events == 1); /* Two timer events but event loops only adds one to the total*/
462 TEST_MSG("Got %u events", events);
464
466
468 TEST_CHECK(events == 2); /* Two I/O write events, no timers */
470
473 TEST_CHECK(events == 0); /* I/O events should have been cleared */
474
475 talloc_free(trunk);
476 talloc_free(ctx);
477}
478
479CC_NO_UBSAN(function) /* UBSAN: false positive - public vs private connection_t trips --fsanitize=function*/
480static connection_state_t _conn_init_no_signal(void **h_out, connection_t *conn, UNUSED void *uctx)
481{
482 int *h;
483
484 h = talloc_array(conn, int, 2);
485 socketpair(AF_UNIX, SOCK_STREAM, 0, h);
486 *h_out = h;
487
489}
490
494 char const *log_prefix, void *uctx)
495{
496 return connection_alloc(tconn, el,
498 .init = _conn_init_no_signal,
499 .open = _conn_open,
500 .close = _conn_close
501 },
503 .connection_timeout = fr_time_delta_from_sec(1),
504 .reconnection_delay = fr_time_delta_from_sec(1)
505 },
506 log_prefix, uctx);
507}
508
510{
511 TALLOC_CTX *ctx = talloc_init_const("test");
512 trunk_t *trunk;
514 int events;
515 trunk_connection_t *tconn;
517 .start = 1,
518 .min = 1
519 };
522 .request_prioritise = fr_pointer_cmp,
523 };
524
526
527 el = fr_event_list_alloc(ctx, NULL, NULL);
528
530
531
532 trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
533 TEST_CHECK(trunk != NULL);
534 if (!trunk) return;
535
536 /*
537 * Trigger connection timeout
538 */
540 TEST_CHECK(fr_event_list_num_timers(el) == 1); /* One timer event for the connection timeout */
542 TEST_CHECK(events == 1); /* We didn't install the I/O events */
543
544 tconn = fr_dlist_head(&trunk->connecting);
545 TEST_CHECK(tconn != NULL);
546 if (tconn == NULL) return;
547
550
551 /*
552 * Timeout should now fire
553 */
555
556 /*
557 * Connection delay not implemented for timed out connections
558 */
561
563 TEST_CHECK(events == 0); /* I/O events should have been cleared */
564
565 talloc_free(trunk);
566 talloc_free(ctx);
567}
568
571 UNUSED connection_conf_t const *conn_conf,
572 char const *log_prefix, void *uctx)
573{
574 return connection_alloc(tconn, el,
576 .init = _conn_init,
577 .open = _conn_open,
578 .close = _conn_close
579 },
581 .connection_timeout = fr_time_delta_from_sec(1),
582 .reconnection_delay = fr_time_delta_from_sec(1)
583 },
584 log_prefix, uctx);
585}
586
588{
589 TALLOC_CTX *ctx = talloc_init_const("test");
590 trunk_t *trunk;
592 int events;
593 trunk_connection_t *tconn;
595 .start = 1,
596 .min = 1,
597 .conn_conf = &(connection_conf_t){
598 .reconnection_delay = fr_time_delta_from_sec(1),
599 .connection_timeout = fr_time_delta_from_sec(1)
600 }
601 };
604 .request_prioritise = fr_pointer_cmp,
605 };
606
608
609 el = fr_event_list_alloc(ctx, NULL, NULL);
611
612 trunk = trunk_alloc(ctx, el, &io_funcs, &conf, "test_socket_pair", NULL, false);
613 TEST_CHECK(trunk != NULL);
614 if (!trunk) return;
615
616 /*
617 * Trigger connection timeout
618 */
620 TEST_CHECK(fr_event_list_num_timers(el) == 1); /* One timer event for the connection timeout */
622 TEST_CHECK(events == 2); /* We didn't install the I/O events */
624
625 tconn = fr_minmax_heap_min_peek(trunk->active);
626 TEST_CHECK(tconn != NULL);
627 if (tconn == NULL) return;
628
631
632 /*
633 * Trigger reconnection
634 */
637
639 TEST_CHECK(events == 0); /* Reconnect delay not ready to fire yet, no I/O handlers installed */
640 TEST_CHECK(fr_event_list_num_timers(el) == 1); /* One timer event for reconnect delay */
641
644 TEST_CHECK(events == 1); /* Reconnect delay should now be ready to fire */
645
646 fr_event_service(el); /* Services the timer, which then triggers init */
647
650
652 TEST_CHECK(events == 1); /* Should have a pending I/O event and a timer */
653
654 talloc_free(trunk);
655 talloc_free(ctx);
656}
657
658/*
659 * Test basic enqueue and dequeue
660 */
661static void test_enqueue_basic(void)
662{
663 TALLOC_CTX *ctx = talloc_init_const("test");
664 trunk_t *trunk;
667 .start = 1,
668 .min = 1,
669 .manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
670 };
672 trunk_request_t *treq = NULL;
673 trunk_enqueue_t rcode;
674
676
677 el = fr_event_list_alloc(ctx, NULL, NULL);
679
680 trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
681
682 /*
683 * Our preq is a pointer to the trunk
684 * request so we don't have to manage
685 * a tree of requests and responses.
686 */
687 preq = talloc_zero(NULL, test_proto_request_t);
688
689 /*
690 * The trunk is active, but there's no
691 * connections.
692 *
693 * We're under the current request limit
694 * so the request should enter the
695 * backlog.
696 */
697 rcode = trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
698 preq->treq = treq;
700
702
703 /*
704 * Allow the connection to establish
705 */
708
711
712 /*
713 * Should now be active and have a write event
714 * inserted into the event loop.
715 */
717
718 /*
719 * Trunk should be signalled the connection is
720 * writable.
721 *
722 * We should then:
723 * - Pop a request from the pending queue.
724 * - Write the request to the socket pair
725 */
728
730
731 /*
732 * Gives the loopback function a chance
733 * to read the data, and write it back.
734 */
737
738 /*
739 * Trunk should be signalled the connection is
740 * readable.
741 *
742 * We should then:
743 * - Read the (looped back) response.
744 * - Signal the trunk that the connection is readable.
745 */
748
749 TEST_CHECK(preq->completed == true);
750 TEST_CHECK(preq->failed == false);
751 TEST_CHECK(preq->cancelled == false);
752 TEST_CHECK(preq->freed == true);
753 talloc_free(preq);
754
755 talloc_free(trunk);
756 talloc_free(ctx);
757}
758
759/*
760 * Test request cancellations when the connection is in various states
761 */
763{
764 TALLOC_CTX *ctx = talloc_init_const("test");
765 trunk_t *trunk;
768 .start = 1,
769 .min = 1,
770 .manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
771 };
773 trunk_request_t *treq = NULL;
774
776
777 el = fr_event_list_alloc(ctx, NULL, NULL);
779
780 trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
781 preq = talloc_zero(NULL, test_proto_request_t);
782 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
783
784 TEST_CASE("cancellation via trunk free - TRUNK_REQUEST_STATE_BACKLOG");
785 talloc_free(trunk);
786 TEST_CHECK(preq->completed == false);
787 TEST_CHECK(preq->failed == true);
788 TEST_CHECK(preq->cancelled == false);
789 TEST_CHECK(preq->freed == true);
790 talloc_free(preq);
791
792 TEST_CASE("cancellation via signal - TRUNK_REQUEST_STATE_BACKLOG");
793 trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
794 preq = talloc_zero(NULL, test_proto_request_t);
795 treq = NULL;
796 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
797 preq->treq = treq;
800
801 TEST_CHECK(preq->completed == false);
802 TEST_CHECK(preq->failed == false); /* Request/rctx not guaranteed after signal, so can't call fail */
803 TEST_CHECK(preq->cancelled == false);
804 TEST_CHECK(preq->freed == true);
805 talloc_free(preq);
806 talloc_free(trunk);
807
808 TEST_CASE("cancellation via trunk free - TRUNK_REQUEST_STATE_PARTIAL");
809 trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
810 preq = talloc_zero(NULL, test_proto_request_t);
811 preq->signal_partial = true;
812 treq = NULL;
813 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
814 preq->treq = treq;
815
816 fr_event_corral(el, test_time_base, false); /* Connect the connection */
818
819 fr_event_corral(el, test_time_base, false); /* Send the request */
821
823
824 talloc_free(trunk);
825
826 TEST_CHECK(preq->completed == false);
827 TEST_CHECK(preq->failed == true);
828 TEST_CHECK(preq->cancelled == true);
829 TEST_CHECK(preq->freed == true);
830 talloc_free(preq);
831
832 TEST_CASE("cancellation via signal - TRUNK_REQUEST_STATE_PARTIAL");
833 trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
834 preq = talloc_zero(NULL, test_proto_request_t);
835 preq->signal_partial = true;
836 treq = NULL;
837 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
838 preq->treq = treq;
839
840 fr_event_corral(el, test_time_base, false); /* Connect the connection */
842
843 fr_event_corral(el, test_time_base, false); /* Send the request */
845
849
850 TEST_CHECK(preq->completed == false);
851 TEST_CHECK(preq->failed == false); /* Request/rctx not guaranteed after signal, so can't call fail */
852 TEST_CHECK(preq->cancelled == true);
853 TEST_CHECK(preq->freed == true);
854 talloc_free(preq);
855 talloc_free(trunk);
856
857 TEST_CASE("cancellation via trunk free - TRUNK_REQUEST_STATE_SENT");
858 trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
859 preq = talloc_zero(NULL, test_proto_request_t);
860 treq = NULL;
861 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
862 preq->treq = treq;
863
864 fr_event_corral(el, test_time_base, false); /* Connect the connection */
866
867 fr_event_corral(el, test_time_base, false); /* Send the request */
869
871 talloc_free(trunk);
872
873 TEST_CHECK(preq->completed == false);
874 TEST_CHECK(preq->failed == true);
875 TEST_CHECK(preq->cancelled == true);
876 TEST_CHECK(preq->freed == true);
877 talloc_free(preq);
878
879 TEST_CASE("cancellation via signal - TRUNK_REQUEST_STATE_SENT");
880 trunk = test_setup_trunk(ctx, el, &conf, false, NULL);
881 preq = talloc_zero(NULL, test_proto_request_t);
882 treq = NULL;
883 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
884 preq->treq = treq;
885
886 fr_event_corral(el, test_time_base, false); /* Connect the connection */
888
889 fr_event_corral(el, test_time_base, false); /* Send the request */
891
895
896 TEST_CHECK(preq->completed == false);
897 TEST_CHECK(preq->failed == false); /* Request/rctx not guaranteed after signal, so can't call fail */
898 TEST_CHECK(preq->cancelled == true);
899 TEST_CHECK(preq->freed == true);
900 talloc_free(preq);
901 talloc_free(trunk);
902
903 TEST_CASE("cancellation via trunk free - TRUNK_REQUEST_STATE_CANCEL_PARTIAL");
904 trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
905 preq = talloc_zero(NULL, test_proto_request_t);
906 preq->signal_cancel_partial = true;
907 treq = NULL;
908 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
909 preq->treq = treq;
910
911 fr_event_corral(el, test_time_base, false); /* Connect the connection */
913
914 fr_event_corral(el, test_time_base, false); /* Send the request */
916
920
921 fr_event_corral(el, test_time_base, false); /* Send the cancellation request */
923
925
926 talloc_free(trunk);
927
928 TEST_CHECK(preq->completed == false);
929 TEST_CHECK(preq->failed == false);
930 TEST_CHECK(preq->cancelled == true);
931 TEST_CHECK(preq->freed == true);
932 talloc_free(preq);
933
934 TEST_CASE("cancellation via trunk free - TRUNK_REQUEST_STATE_CANCEL_SENT");
935 trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
936 preq = talloc_zero(NULL, test_proto_request_t);
937 treq = NULL;
938 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
939 preq->treq = treq;
940
941 fr_event_corral(el, test_time_base, false); /* Connect the connection */
943
944 fr_event_corral(el, test_time_base, false); /* Send the request */
946
950
951 fr_event_corral(el, test_time_base, false); /* Send the cancellation request */
953
955
956 talloc_free(trunk);
957
958 TEST_CHECK(preq->completed == false);
959 TEST_CHECK(preq->failed == false);
960 TEST_CHECK(preq->cancelled == true);
961 TEST_CHECK(preq->freed == true);
962 talloc_free(preq);
963
964 TEST_CASE("trunk free after TRUNK_REQUEST_STATE_CANCEL_COMPLETE");
965 trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
966 preq = talloc_zero(NULL, test_proto_request_t);
967 treq = NULL;
968 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
969 preq->treq = treq;
970
971 fr_event_corral(el, test_time_base, false); /* Connect the connection */
973
974 fr_event_corral(el, test_time_base, false); /* Send the request */
976
980
981 fr_event_corral(el, test_time_base, false); /* Send the cancellation request */
983
985
986 fr_event_corral(el, test_time_base, false); /* Loop the cancel request back round */
988
989 fr_event_corral(el, test_time_base, false); /* Read the cancel ACK (such that it is) */
991
993
994 talloc_free(trunk);
995
996 TEST_CHECK(preq->completed == false);
997 TEST_CHECK(preq->failed == false);
998 TEST_CHECK(preq->cancelled == true);
999 TEST_CHECK(preq->freed == true);
1000 talloc_free(preq);
1001
1002
1003 talloc_free(ctx);
1004}
1005
1006/*
1007 * Test PARTIAL -> SENT and CANCEL-PARTIAL -> CANCEL-SENT
1008 */
1010{
1011 TALLOC_CTX *ctx = talloc_init_const("test");
1012 trunk_t *trunk;
1014 trunk_conf_t conf = {
1015 .start = 1,
1016 .min = 1,
1017 .manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
1018 };
1020 trunk_request_t *treq = NULL;
1021
1023
1024 el = fr_event_list_alloc(ctx, NULL, NULL);
1026
1027 trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
1028 preq = talloc_zero(NULL, test_proto_request_t);
1029 preq->signal_partial = true;
1030 preq->signal_cancel_partial = true;
1031
1032 TEST_CASE("TRUNK_REQUEST_STATE_PARTIAL -> TRUNK_REQUEST_STATE_SENT");
1033
1034 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
1035 preq->treq = treq;
1036
1037 fr_event_corral(el, test_time_base, false); /* Connect the connection */
1039
1040 fr_event_corral(el, test_time_base, false); /* Send the request */
1042
1044
1045 fr_event_corral(el, test_time_base, false); /* Complete the partial request */
1047
1049
1052
1053 TEST_CASE("TRUNK_REQUEST_STATE_CANCEL_PARTIAL -> TRUNK_REQUEST_STATE_CANCEL_SENT");
1054
1055 fr_event_corral(el, test_time_base, false); /* Send partial cancel request */
1057
1059
1060 fr_event_corral(el, test_time_base, false); /* Complete the partial cancellation */
1062
1064
1065 fr_event_corral(el, test_time_base, false); /* Loop the cancellation request back */
1067
1069
1070 talloc_free(trunk);
1071
1072 TEST_CHECK(preq->completed == false);
1073 TEST_CHECK(preq->failed == false);
1074 TEST_CHECK(preq->cancelled == true);
1075 TEST_CHECK(preq->freed == true);
1076 talloc_free(preq);
1077
1078 talloc_free(ctx);
1079}
1080
1081/*
1082 * Test calling reconnect with requests in each different state
1083 */
1085{
1086 TALLOC_CTX *ctx = talloc_init_const("test");
1087 trunk_t *trunk;
1089 trunk_conf_t conf = {
1090 .start = 2,
1091 .min = 2,
1092 .manage_interval = fr_time_delta_from_nsec(NSEC * 0.5),
1093 .conn_conf = &(connection_conf_t){
1094 .reconnection_delay = fr_time_delta_from_nsec(NSEC / 10)
1095 },
1096 .backlog_on_failed_conn = true
1097 };
1099 trunk_request_t *treq = NULL;
1100 trunk_connection_t *tconn;
1101
1104
1105 el = fr_event_list_alloc(ctx, NULL, NULL);
1107
1108 trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
1109 preq = talloc_zero(ctx, test_proto_request_t);
1110 preq->signal_partial = true;
1111 preq->signal_cancel_partial = true;
1112
1113 fr_event_corral(el, test_time_base, false); /* Connect the connection(s) */
1115
1116 TEST_CASE("dequeue on reconnect - TRUNK_REQUEST_STATE_PENDING");
1117
1119
1120 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
1121 preq->treq = treq;
1122
1123 tconn = treq->pub.tconn; /* Store the conn the request was assigned to */
1125
1127
1128 /*
1129 * Should be reassigned to the other connection
1130 */
1131 TEST_CHECK(tconn != treq->pub.tconn);
1133
1134 /*
1135 * Should be reassigned to the backlog
1136 */
1139 TEST_CHECK(!treq->pub.tconn);
1140
1141 TEST_CASE("cancel on reconnect - TRUNK_REQUEST_STATE_PARTIAL");
1142
1143 /*
1144 * Allow the connections to reconnect
1145 */
1148 fr_event_service(el); /* run management function */
1150 fr_event_service(el); /* service any I/O callbacks */
1151
1152 /*
1153 * Request should now be assigned back to one of the reconnected
1154 * connections.
1155 */
1157 TEST_CHECK(treq->pub.tconn != NULL);
1158
1160 fr_event_corral(el, test_time_base, false); /* Send the request (partially) */
1162
1164
1165 /*
1166 * Reconnect the connection.
1167 *
1168 * preq should pass through the cancel function,
1169 * then be re-assigned.
1170 */
1171 tconn = treq->pub.tconn;
1173
1174 TEST_CHECK(preq->completed == false);
1175 TEST_CHECK(preq->failed == false);
1176 TEST_CHECK(preq->cancelled == true);
1177 TEST_CHECK(preq->freed == false);
1178
1179 preq->cancelled = false; /* Reset */
1180
1182 TEST_CHECK(tconn != treq->pub.tconn); /* Ensure it moved */
1183
1184 TEST_CASE("cancel on reconnect - TRUNK_REQUEST_STATE_SENT");
1185
1186 /*
1187 * Sent the request (fully)
1188 */
1189 fr_event_corral(el, test_time_base, false); /* Send the request (partially) */
1191
1192 /*
1193 * The above indeed appears to send the request partially;
1194 * this appears to be required to send it fully, judging by
1195 * the following check, which fails without it.
1196 */
1197 fr_event_corral(el, test_time_base, false); /* Send the request (partially) */
1200
1201 tconn = treq->pub.tconn;
1203
1205
1206 /*
1207 * Allow the connections to reconnect
1208 * and send the request.
1209 */
1213 TEST_CHECK(tconn != treq->pub.tconn); /* Ensure it moved */
1214
1215 TEST_CHECK(preq->completed == false);
1216 TEST_CHECK(preq->failed == false);
1217 TEST_CHECK(preq->cancelled == true);
1218 TEST_CHECK(preq->freed == false);
1219
1220 preq->cancelled = false; /* Reset */
1221
1222 TEST_CASE("free on reconnect - TRUNK_REQUEST_STATE_CANCEL");
1223
1224 /*
1225 * Signal the request should be cancelled
1226 */
1229
1230 /*
1231 * Requests in the cancel state, are
1232 * freed instead of being moved between
1233 * connections.
1234 */
1235 trunk_connection_signal_reconnect(tconn, CONNECTION_FAILED); /* treq->pub.tconn, now invalid due to cancel */
1236
1240
1244
1248
1249 TEST_CHECK(preq->completed == false);
1250 TEST_CHECK(preq->failed == false);
1251 TEST_CHECK(preq->cancelled == true);
1252 TEST_CHECK(preq->freed == true);
1253
1254 /*
1255 * Allow the connection we just reconnected
1256 * to open so it doesn't interfere with
1257 * the next test.
1258 */
1262
1263 TEST_CASE("free on reconnect - TRUNK_REQUEST_STATE_CANCEL_PARTIAL");
1264
1265 /*
1266 * Queue up a new request, and get it to the cancel-partial state.
1267 */
1268 preq = talloc_zero(ctx, test_proto_request_t);
1269 preq->signal_cancel_partial = true;
1270 treq = NULL;
1271 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
1272 preq->treq = treq;
1273
1275
1276 /*
1277 * Sent the request (fully)
1278 */
1280 fr_event_corral(el, test_time_base, false); /* Send the request (fully) */
1282
1284 trunk_request_signal_cancel(treq); /* Cancel the request */
1285
1287
1288 /*
1289 * Transition to cancel partial
1290 */
1294
1296
1297 /*
1298 * Trigger a reconnection
1299 */
1301
1305
1306 TEST_CHECK(preq->completed == false);
1307 TEST_CHECK(preq->failed == false);
1308 TEST_CHECK(preq->cancelled == true);
1309 TEST_CHECK(preq->freed == true);
1310
1311 /*
1312 * Allow the connection we just reconnected
1313 * top open so it doesn't interfere with
1314 * the next test.
1315 */
1319
1320 TEST_CASE("free on reconnect - TRUNK_REQUEST_STATE_CANCEL_SENT");
1321
1322 /*
1323 * Queue up a new request, and get it to the cancel-sent state.
1324 */
1325 preq = talloc_zero(NULL, test_proto_request_t);
1326 treq = NULL;
1327 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
1328 preq->treq = treq;
1329
1331
1332 /*
1333 * Sent the request (fully)
1334 */
1336 fr_event_corral(el, test_time_base, false); /* Send the request (fully) */
1338
1340 trunk_request_signal_cancel(treq); /* Cancel the request */
1341
1343
1344 /*
1345 * Transition to cancel
1346 */
1350
1352
1353 /*
1354 * Trigger a reconnection
1355 */
1357
1361
1362 TEST_CHECK(preq->completed == false);
1363 TEST_CHECK(preq->failed == false);
1364 TEST_CHECK(preq->cancelled == true);
1365 TEST_CHECK(preq->freed == true);
1366
1367 talloc_free(preq);
1368
1369 talloc_free(ctx);
1370}
1371
1373{
1374 TALLOC_CTX *ctx = talloc_init_const("test");
1375 trunk_t *trunk;
1377 trunk_conf_t conf = {
1378 .start = 0,
1379 .min = 0, /* No connections on start */
1380 .manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
1381 };
1383 trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL;
1384
1386
1387 el = fr_event_list_alloc(ctx, NULL, NULL);
1389
1390 /* Need to provide a timer starting value above zero */
1392
1393 trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
1394 preq = talloc_zero(NULL, test_proto_request_t);
1395
1396 TEST_CASE("C0 - Enqueue should spawn");
1397 trunk_request_enqueue(&treq_a, trunk, NULL, preq, NULL);
1398
1399 /*
1400 * This causes the event associated with the request left on
1401 * the backlog queue to be handled, which (along with the other
1402 * corral; service sequence, makes the checks all pass.
1403 */
1406
1408
1409 TEST_CASE("C1 connecting, !max_req_per_conn - Enqueue MUST NOT spawn");
1410 trunk_request_enqueue(&treq_b, trunk, NULL, preq, NULL);
1411
1413
1414 /*
1415 * Allow the connections to open
1416 */
1419
1421
1422 TEST_CASE("C1 active, !max_req_per_conn - Enqueue MUST NOT spawn");
1423 trunk_request_enqueue(&treq_c, trunk, NULL, preq, NULL);
1424
1427
1428 talloc_free(ctx);
1429 talloc_free(preq);
1430}
1431
1433{
1434 TALLOC_CTX *ctx = talloc_init_const("test");
1435 trunk_t *trunk;
1437 trunk_conf_t conf = {
1438 .start = 2,
1439 .min = 2, /* No connections on start */
1440 .manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
1441 };
1443 trunk_connection_t *tconn;
1444 trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL;
1445
1447
1448 el = fr_event_list_alloc(ctx, NULL, NULL);
1450
1451 trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
1452 preq = talloc_zero(NULL, test_proto_request_t);
1453 printf("Rebalance %p\n", preq);
1454
1455 /*
1456 * Allow the connections to open
1457 */
1460
1461 /*
1462 * Mark one of the connections as full, and
1463 * enqueue three requests on the other.
1464 */
1465 tconn = fr_minmax_heap_min_peek(trunk->active);
1466
1467 TEST_CASE("C2 connected, R0 - Signal inactive");
1469
1470
1471 trunk_request_enqueue(&treq_a, trunk, NULL, preq, NULL);
1472 trunk_request_enqueue(&treq_b, trunk, NULL, preq, NULL);
1473 trunk_request_enqueue(&treq_c, trunk, NULL, preq, NULL);
1474
1475 TEST_CASE("C1 connected, C2 inactive, R3 - Enqueued");
1478
1479 /*
1480 * Now mark the previous connection as
1481 * active. It should receive at least
1482 * one of the requests.
1483 */
1484 TEST_CASE("C2 active, R3 - Signal active, should balance");
1486
1489
1490 talloc_free(ctx);
1491 talloc_free(preq);
1492}
1493
1494#define ALLOC_REQ(_id) \
1495do { \
1496 treq_##_id = trunk_request_alloc(trunk, NULL); \
1497 preq_##_id = talloc_zero(ctx, test_proto_request_t); \
1498 preq_##_id->treq = treq_##_id; \
1499 preq_##_id->priority = next_prio++; \
1500} while (0)
1501
1503{
1504 TALLOC_CTX *ctx = talloc_init_const("test");
1505 trunk_t *trunk;
1507 trunk_conf_t conf = {
1508 .start = 0, /* No connections on start */
1509 .min = 0,
1510 .max = 2,
1511 .max_req_per_conn = 2,
1512 .target_req_per_conn = 2, /* One request per connection */
1513 .manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
1514 };
1515 test_proto_request_t *preq_a, *preq_b, *preq_c, *preq_d, *preq_e;
1516 trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL, *treq_d = NULL, *treq_e = NULL;
1517 int next_prio = 0;
1518
1520
1521 el = fr_event_list_alloc(ctx, NULL, NULL);
1523
1524 /* Need to provide a timer starting value above zero */
1526
1527 trunk = test_setup_trunk(ctx, el, &conf, true, NULL);
1528 TRUNK_VERIFY(trunk);
1529
1530 /*
1531 * Queuing a request should start a connection.
1532 */
1533 TEST_CASE("C0, R1 - Enqueue should spawn");
1534 ALLOC_REQ(a);
1535 TEST_CHECK(trunk_request_enqueue(&treq_a, trunk, NULL, preq_a, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
1536 TRUNK_VERIFY(trunk);
1537
1538 /*
1539 * Like test_connection_start_on_enqueue(), you have to process the backlog
1540 * to start the chain of events.
1541 */
1545
1547 TRUNK_VERIFY(trunk);
1548
1549 /*
1550 * Queuing another request should *NOT* start another connection
1551 */
1552 TEST_CASE("C1 connecting, R2 - MUST NOT spawn");
1553 ALLOC_REQ(b);
1554 TEST_CHECK(trunk_request_enqueue(&treq_b, trunk, NULL, preq_b, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
1556 TRUNK_VERIFY(trunk);
1557
1558 TEST_CASE("C1 connecting, R3 - MUST NOT spawn");
1559 ALLOC_REQ(c);
1560 TEST_CHECK(trunk_request_enqueue(&treq_c, trunk, NULL, preq_c, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
1562 TRUNK_VERIFY(trunk);
1563
1564 TEST_CASE("C1 connecting, R4 - MUST NOT spawn");
1565 ALLOC_REQ(d);
1566 TEST_CHECK(trunk_request_enqueue(&treq_d, trunk, NULL, preq_d, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
1568 TRUNK_VERIFY(trunk);
1569
1570 TEST_CASE("C1 connecting, R5 - MUST NOT spawn, NO CAPACITY");
1571 ALLOC_REQ(e);
1572 TEST_CHECK(trunk_request_enqueue(&treq_e, trunk, NULL, preq_e, NULL) == TRUNK_ENQUEUE_NO_CAPACITY);
1574 TRUNK_VERIFY(trunk);
1575
1576 /*
1577 * Allowing connection to open
1578 */
1581
1582 TEST_CASE("C1 active, R4 - Check pending 2");
1585 TRUNK_VERIFY(trunk);
1586
1587 /*
1588 * Sending requests
1589 */
1592
1593 TEST_CASE("C1 active, R4 - Check sent 2");
1595 TRUNK_VERIFY(trunk);
1596
1597 /*
1598 * Looping I/O
1599 */
1603
1604 /*
1605 * Receiving responses
1606 */
1610
1611 TEST_CHECK(preq_a->completed == true);
1612 TEST_CHECK(preq_a->failed == false);
1613 TEST_CHECK(preq_a->cancelled == false);
1614 TEST_CHECK(preq_a->freed == true);
1615
1616 TEST_CHECK(preq_b->completed == true);
1617 TEST_CHECK(preq_b->failed == false);
1618 TEST_CHECK(preq_b->cancelled == false);
1619 TEST_CHECK(preq_b->freed == true);
1620
1623 TRUNK_VERIFY(trunk);
1624
1625 TEST_CASE("C1 active, R0 - Check complete 2, pending 0");
1626
1627 /*
1628 * Sending requests
1629 */
1632
1633 /*
1634 * Looping I/O
1635 */
1638
1639 /*
1640 * Receiving responses
1641 */
1644
1645 TEST_CHECK(preq_c->completed == true);
1646 TEST_CHECK(preq_c->failed == false);
1647 TEST_CHECK(preq_c->cancelled == false);
1648 TEST_CHECK(preq_c->freed == true);
1649
1650 TEST_CHECK(preq_d->completed == true);
1651 TEST_CHECK(preq_d->failed == false);
1652 TEST_CHECK(preq_d->cancelled == false);
1653 TEST_CHECK(preq_d->freed == true);
1654
1656 TRUNK_VERIFY(trunk);
1657
1658 talloc_free(trunk);
1659 talloc_free(ctx);
1660}
1661
1663{
1664 TALLOC_CTX *ctx = talloc_init_const("test");
1665 trunk_t *trunk;
1667 trunk_conf_t conf = {
1668 .start = 0, /* No connections on start */
1669 .min = 0,
1670 .max = 0,
1671 .max_req_per_conn = 0,
1672 .target_req_per_conn = 2, /* One request per connection */
1673 .manage_interval = fr_time_delta_from_nsec(NSEC / 10)
1674 };
1675
1676 test_proto_request_t *preq_a, *preq_b, *preq_c;
1677 trunk_request_t *treq_a = NULL, *treq_b = NULL, *treq_c = NULL;
1678 test_proto_stats_t stats;
1679 int next_prio = 0;
1680
1682
1683 el = fr_event_list_alloc(ctx, NULL, NULL);
1685
1686 /* Need to provide a timer starting value above zero */
1688
1689 memset(&stats, 0, sizeof(stats));
1690 trunk = test_setup_trunk(ctx, el, &conf, true, &stats);
1691
1692 /*
1693 * Queuing a request should start a connection.
1694 */
1695 TEST_CASE("C0, R1 - Enqueue should spawn");
1696 ALLOC_REQ(a);
1697 TEST_CHECK(trunk_request_enqueue(&treq_a, trunk, NULL, preq_a, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
1698
1699 /*
1700 * Processing the event associated with the backlog creates
1701 * the connection in connecting state..
1702 */
1705
1707
1708 TEST_CASE("C1 connecting, R2 - MUST NOT spawn");
1709 ALLOC_REQ(b);
1710 TEST_CHECK(trunk_request_enqueue(&treq_b, trunk, NULL, preq_b, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
1713
1714 /*
1715 * Open connection
1716 */
1719
1721
1722 TEST_CASE("C1 connected, R3 - should spawn");
1723 ALLOC_REQ(c);
1724 TEST_CHECK(trunk_request_enqueue(&treq_c, trunk, NULL, preq_c, NULL) == TRUNK_ENQUEUE_OK);
1726
1729
1733
1734 /*
1735 * Complete requests
1736 */
1738
1741
1743
1744 TEST_CASE("C1 connected, C2 connecting, R2 - MUST NOT spawn");
1747
1748 /*
1749 * Finish the last request, should close one connection
1750 */
1753
1755
1756 TEST_CASE("C1 connected, R0");
1759
1760 /*
1761 * Requests now done, should close another connection
1762 */
1765
1767
1768 TEST_CASE("C0, R0");
1770
1771 TEST_CHECK(stats.completed == 3);
1772 TEST_CHECK(stats.failed == 0);
1773 TEST_CHECK(stats.cancelled == 0);
1774 TEST_CHECK(stats.freed == 3);
1775
1776 /*
1777 * Queuing a request should start a connection.
1778 */
1779 TEST_CASE("C0, R1 - Enqueue should spawn");
1780 ALLOC_REQ(a);
1781 TEST_CHECK(trunk_request_enqueue(&treq_a, trunk, NULL, preq_a, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
1782
1783 /*
1784 * ...once the event associated with the backlogged request is handled.
1785 */
1788
1790
1791 TEST_CASE("C1 connecting, R2 - MUST NOT spawn");
1792 ALLOC_REQ(b);
1793 TEST_CHECK(trunk_request_enqueue(&treq_b, trunk, NULL, preq_b, NULL) == TRUNK_ENQUEUE_IN_BACKLOG);
1796
1797 /*
1798 * Open connection
1799 */
1802
1804
1805 TEST_CASE("C1 connected, R3 - should spawn");
1806 ALLOC_REQ(c);
1807 TEST_CHECK(trunk_request_enqueue(&treq_c, trunk, NULL, preq_c, NULL) == TRUNK_ENQUEUE_OK);
1809
1812
1816
1817 talloc_free(trunk);
1818 talloc_free(ctx);
1819}
1820
1821#undef fr_time /* Need to the real time */
1823{
1824 TALLOC_CTX *ctx = talloc_init_const("test");
1825 trunk_t *trunk;
1827 int events;
1828 trunk_conf_t conf = {
1829 .start = 1,
1830 .min = 1,
1831 .max = 0,
1832 .max_req_per_conn = 0,
1833 .target_req_per_conn = 0, /* One request per connection */
1834 .req_pool_headers = 1,
1835 .req_pool_size = sizeof(test_proto_request_t),
1836 .manage_interval = fr_time_delta_from_nsec(NSEC * 0.5)
1837 };
1838 size_t i = 0, requests = 100000;
1839 fr_time_t enqueue_start, enqueue_stop, io_start, io_stop;
1840 fr_time_delta_t enqueue_time, io_time, total_time;
1841 trunk_request_t **treq_array;
1842 test_proto_request_t **preq_array;
1843 test_proto_stats_t stats;
1844
1846
1847 el = fr_event_list_alloc(ctx, NULL, NULL);
1849
1850 /* Need to provide a timer starting value above zero */
1852
1853 memset(&stats, 0, sizeof(stats));
1854 trunk = test_setup_trunk(ctx, el, &conf, true, &stats);
1855
1856 /*
1857 * Open the connections
1858 */
1861
1862 /*
1863 * Build up a cache of requests
1864 * This prevents all mallocs on request enqueue.
1865 *
1866 * When the server's running, this does represent
1867 * close to what we'd have as a steady state.
1868 */
1869 MEM(treq_array = talloc_array(ctx, trunk_request_t *, requests));
1870 for (i = 0; i < requests; i++) treq_array[i] = trunk_request_alloc(trunk, NULL);
1871 for (i = 0; i < requests; i++) trunk_request_free(&treq_array[i]);
1872
1873 MEM(preq_array = talloc_array(ctx, test_proto_request_t *, requests));
1874
1876
1877 TEST_CASE("Enqueue requests");
1878 enqueue_start = fr_time();
1879// ProfilerStart(getenv("FR_PROFILE"));
1880 for (i = 0; i < requests; i++) {
1881 trunk_request_t *treq;
1882 test_proto_request_t *preq = NULL;
1883
1884 treq = trunk_request_alloc(trunk, NULL);
1885 preq = talloc_zero(treq, test_proto_request_t);
1886 preq->treq = treq;
1887 trunk_request_enqueue(&treq, trunk, NULL, preq, NULL);
1888 }
1889 enqueue_stop = fr_time();
1890 enqueue_time = fr_time_sub(enqueue_stop, enqueue_start);
1891 if (acutest_verbose_level_ >= 1) {
1892 INFO("Enqueue time %pV (%u rps) (%"PRIu64"/%"PRIu64")",
1893 fr_box_time_delta(enqueue_time),
1894 (uint32_t)(requests / ((float)(fr_time_delta_unwrap(enqueue_time)) / NSEC)),
1895 trunk->pub.req_alloc_new, trunk->pub.req_alloc_reused);
1896 }
1897
1898 TEST_CASE("Perform I/O operations");
1899 io_start = fr_time();
1900 while (true) {
1902 if (!events) break;
1905 }
1906 io_stop = fr_time();
1907 io_time = fr_time_sub(io_stop, io_start);
1908
1909 if (acutest_verbose_level_ >= 1) {
1910 INFO("I/O time %pV (%u rps)",
1911 fr_box_time_delta(io_time),
1912 (uint32_t)(requests / ((float)(fr_time_delta_unwrap(io_time)) / NSEC)));
1913 }
1914
1915 if (acutest_verbose_level_ >= 1) {
1916 total_time = fr_time_sub(io_stop, enqueue_start);
1917 INFO("Total time %pV (%u rps)",
1918 fr_box_time_delta(total_time),
1919 (uint32_t)(requests / ((float)(fr_time_delta_unwrap(total_time)) / NSEC)));
1920 }
1921
1922 TEST_CHECK_LEN(stats.completed, requests);
1923 TEST_CHECK_LEN(stats.failed, 0);
1924 TEST_CHECK_LEN(stats.cancelled, 0);
1925 TEST_CHECK_LEN(stats.freed, requests);
1926
1927// ProfilerStop();
1928
1929 talloc_free(ctx);
1930}
1931
1932/*
1933 * Connection spawning
1934 */
1936 /*
1937 * Basic tests
1938 */
1939 { "Basic - Alloc then free", test_socket_pair_alloc_then_free },
1940 { "Basic - Alloc then reconnect then free", test_socket_pair_alloc_then_reconnect_then_free },
1941
1942 /*
1943 * Connection timeout
1944 */
1945 { "Timeouts - Connection", test_socket_pair_alloc_then_connect_timeout },
1946 { "Timeouts - Reconnect delay", test_socket_pair_alloc_then_reconnect_check_delay },
1947
1948 /*
1949 * Basic enqueue/dequeue
1950 */
1951 { "Enqueue - Basic", test_enqueue_basic },
1952 { "Enqueue - Cancellation points", test_enqueue_cancellation_points },
1953 { "Enqueue - Partial state transitions", test_partial_to_complete_states },
1954 { "Requeue - On reconnect", test_requeue_on_reconnect },
1955
1956 /*
1957 * Rebalance
1958 */
1959 { "Rebalance - Connection rebalance", test_connection_rebalance_requests },
1960
1961 /*
1962 * Connection spawning tests
1963 */
1964 { "Spawn - Test connection start on enqueue", test_connection_start_on_enqueue },
1965 { "Spawn - Connection levels max", test_connection_levels_max },
1966 { "Spawn - Connection levels alternating edges",test_connection_levels_alternating_edges },
1967
1968 /*
1969 * Performance tests
1970 */
1971 { "Speed Test - Enqueue, and I/O", test_enqueue_and_io_speed },
1972 { NULL }
1973};
#define TEST_CHECK(cond)
Definition acutest.h:85
static int acutest_verbose_level_
Definition acutest.h:418
#define TEST_CASE(name)
Definition acutest.h:184
#define TEST_MSG(...)
Definition acutest.h:215
#define TEST_CHECK_LEN(_got, _exp)
#define CC_NO_UBSAN(_sanitize)
Definition build.h:426
#define CMP(_a, _b)
Same as CMP_PREFER_SMALLER use when you don't really care about ordering, you just want an ordering.
Definition build.h:112
#define UNUSED
Definition build.h:315
connection_state_t
Definition connection.h:45
@ CONNECTION_STATE_CONNECTED
File descriptor is open (ready for writing).
Definition connection.h:52
@ CONNECTION_STATE_CONNECTING
Waiting for connection to establish.
Definition connection.h:50
@ CONNECTION_FAILED
Connection is being reconnected because it failed.
Definition connection.h:84
Holds a complete set of functions for a connection.
Definition connection.h:186
void fr_talloc_fault_setup(void)
Register talloc fault handlers.
Definition debug.c:1223
#define MEM(x)
Definition debug.h:36
static void * fr_dlist_head(fr_dlist_head_t const *list_head)
Return the HEAD item of a list or NULL if the list is empty.
Definition dlist.h:486
#define fr_event_fd_insert(...)
Definition event.h:232
@ FR_EVENT_FILTER_IO
Combined filter for read/write functions/.
Definition event.h:62
void fr_event_service(fr_event_list_t *el)
Service any outstanding timer or file descriptor events.
Definition event.c:2549
void fr_event_list_set_time_func(fr_event_list_t *el, fr_event_time_source_t func)
Override event list time source.
Definition event.c:2971
int fr_event_corral(fr_event_list_t *el, fr_time_t now, bool wait)
Gather outstanding timer and file descriptor events.
Definition event.c:2414
uint64_t fr_event_list_num_timers(fr_event_list_t *el)
Return the number of timer events currently scheduled.
Definition event.c:613
talloc_free(reap)
fr_event_list_t * fr_event_list_alloc(TALLOC_CTX *ctx, fr_event_status_cb_t status, void *status_uctx)
Initialise a new event list.
Definition event.c:2899
int fr_event_fd_delete(fr_event_list_t *el, int fd, fr_event_filter_t filter)
Remove a file descriptor from the event loop.
Definition event.c:1260
Stores all information relating to an event list.
Definition event.c:411
unsigned int uint32_t
long int ssize_t
unsigned char uint8_t
unsigned long int size_t
void * fr_minmax_heap_min_peek(fr_minmax_heap_t *hp)
int fr_nonblock(UNUSED int fd)
Definition misc.c:293
int8_t fr_pointer_cmp(void const *a, void const *b)
Compares two pointers.
Definition misc.c:417
static const trunk_io_funcs_t io_funcs
Definition bio.c:2416
#define fr_assert(_expr)
Definition rad_assert.h:38
#define INFO(fmt,...)
Definition radict.c:54
static fr_event_list_t * events
Definition radsniff.c:59
static rs_t * conf
Definition radsniff.c:53
uint64_t connection_get_num_timed_out(connection_t const *conn)
Return the number of times this connection has timed out whilst connecting.
Definition connection.c:616
uint64_t connection_get_num_reconnected(connection_t const *conn)
Return the number of times we've attempted to establish or re-establish this connection.
Definition connection.c:604
void connection_signal_reconnect(connection_t *conn, connection_reason_t reason)
Asynchronously signal the connection should be reconnected.
int connection_signal_on_fd(connection_t *conn, int fd)
Setup the connection to change states to connected or failed based on I/O events.
connection_t * connection_alloc(TALLOC_CTX *ctx, fr_event_list_t *el, connection_funcs_t const *funcs, connection_conf_t const *conf, char const *log_prefix, void const *uctx)
Allocate a new connection.
static char buff[sizeof("18446744073709551615")+3]
Definition size_tests.c:41
static fr_time_t test_time_base
Definition slab_tests.c:42
return count
Definition module.c:163
fr_time_t test_time
Definition state_test.c:3
#define fr_time()
Allow us to arbitrarily manipulate time.
Definition state_test.c:8
char const * fr_syserror(int num)
Guaranteed to be thread-safe version of strerror.
Definition syserror.c:243
static TALLOC_CTX * talloc_init_const(char const *name)
Allocate a top level chunk with a constant name.
Definition talloc.h:112
static fr_time_t fr_time_add_time_delta(fr_time_t a, fr_time_delta_t b)
Definition time.h:173
static int64_t fr_time_delta_unwrap(fr_time_delta_t time)
Definition time.h:154
static fr_time_delta_t fr_time_delta_from_sec(int64_t sec)
Definition time.h:590
#define NSEC
Definition time.h:379
static fr_time_delta_t fr_time_delta_from_nsec(int64_t nsec)
Definition time.h:563
#define fr_time_sub(_a, _b)
Subtract one time from another.
Definition time.h:229
A time delta, a difference in time measured in nanoseconds.
Definition time.h:80
"server local" time.
Definition time.h:69
A management API for bonding multiple connections together.
int trunk_connection_pop_cancellation(trunk_request_t **treq_out, trunk_connection_t *tconn)
Pop a cancellation request off a connection's cancellation queue.
Definition trunk.c:3835
void trunk_reconnect(trunk_t *trunk, int states, connection_reason_t reason)
Force the trunk to re-establish its connections.
Definition trunk.c:4725
void trunk_request_signal_partial(trunk_request_t *treq)
Signal a partial write.
Definition trunk.c:2029
void trunk_request_signal_cancel_sent(trunk_request_t *treq)
Signal that a remote server has been notified of the cancellation.
Definition trunk.c:2260
void trunk_connection_signal_readable(trunk_connection_t *tconn)
Signal that a trunk connection is readable.
Definition trunk.c:3921
trunk_request_t * trunk_request_alloc(trunk_t *trunk, request_t *request)
(Pre-)Allocate a new trunk request
Definition trunk.c:2474
uint32_t trunk_request_count_by_connection(trunk_connection_t const *tconn, int req_state)
Return the count number of requests associated with a trunk connection.
Definition trunk.c:2879
struct trunk_request_pub_s pub
Public fields in the trunk request.
Definition trunk.c:101
trunk_t * trunk_alloc(TALLOC_CTX *ctx, fr_event_list_t *el, trunk_io_funcs_t const *funcs, trunk_conf_t const *conf, char const *log_prefix, void const *uctx, bool delay_start)
Allocate a new collection of connections.
Definition trunk.c:4945
struct trunk_pub_s pub
Public fields in the trunk connection.
Definition trunk.c:199
trunk_enqueue_t trunk_request_enqueue(trunk_request_t **treq_out, trunk_t *trunk, request_t *request, void *preq, void *rctx)
Enqueue a request that needs data written to the trunk.
Definition trunk.c:2587
void trunk_request_signal_cancel_complete(trunk_request_t *treq)
Signal that a remote server acked our cancellation.
Definition trunk.c:2284
int trunk_connection_pop_request(trunk_request_t **treq_out, trunk_connection_t *tconn)
Pop a request off a connection's pending queue.
Definition trunk.c:3883
fr_dlist_head_t connecting
Connections which are not yet in the open state.
Definition trunk.c:225
void trunk_request_signal_cancel(trunk_request_t *treq)
Cancel a trunk request.
Definition trunk.c:2152
struct trunk_connection_pub_s pub
Public fields in the trunk connection.
Definition trunk.c:135
uint16_t trunk_connection_count_by_state(trunk_t *trunk, int conn_state)
Return the count number of connections in the specified states.
Definition trunk.c:2855
fr_minmax_heap_t * active
Connections which can service requests.
Definition trunk.c:227
void trunk_request_free(trunk_request_t **treq_to_free)
If the trunk request is freed then update the target requests.
Definition trunk.c:2322
void trunk_connection_signal_active(trunk_connection_t *tconn)
Signal a trunk connection is no longer full.
Definition trunk.c:3960
void trunk_connection_signal_inactive(trunk_connection_t *tconn)
Signal a trunk connection cannot accept more requests.
Definition trunk.c:3937
uint64_t trunk_request_count_by_state(trunk_t *trunk, int conn_state, int req_state)
Return a count of requests on a connection in a specific state.
Definition trunk.c:4522
void trunk_request_signal_cancel_partial(trunk_request_t *treq)
Signal a partial cancel write.
Definition trunk.c:2236
void trunk_request_signal_sent(trunk_request_t *treq)
Signal that the request was written to a connection successfully.
Definition trunk.c:2050
void trunk_request_signal_complete(trunk_request_t *treq)
Signal that a trunk request is complete.
Definition trunk.c:2094
void trunk_connection_signal_reconnect(trunk_connection_t *tconn, connection_reason_t reason)
Signal a trunk connection is no longer viable.
Definition trunk.c:3999
void trunk_connection_signal_writable(trunk_connection_t *tconn)
Signal that a trunk connection is writable.
Definition trunk.c:3903
Associates request queues with a connection.
Definition trunk.c:134
Wraps a normal request.
Definition trunk.c:100
Main trunk management handle.
Definition trunk.c:198
#define TRUNK_VERIFY(_trunk)
Definition trunk.h:929
#define TRUNK_REQUEST_STATE_ALL
All request states.
Definition trunk.h:195
@ TRUNK_CONN_CONNECTING
Connection is connecting.
Definition trunk.h:90
@ TRUNK_CONN_ACTIVE
Connection is connected and ready to service requests.
Definition trunk.h:91
uint64_t _CONST req_alloc_reused
How many requests were reused.
Definition trunk.h:328
trunk_request_state_t _CONST state
Which list the request is now located in.
Definition trunk.h:345
trunk_connection_t *_CONST tconn
Connection this request belongs to.
Definition trunk.h:349
trunk_connection_alloc_t connection_alloc
Allocate a new connection_t.
Definition trunk.h:733
trunk_connection_event_t
What type of I/O events the trunk connection is currently interested in receiving.
Definition trunk.h:72
@ TRUNK_CONN_EVENT_BOTH
Trunk should be notified if a connection is readable or writable.
Definition trunk.h:79
@ TRUNK_CONN_EVENT_WRITE
Trunk should be notified if a connection is writable.
Definition trunk.h:77
@ TRUNK_CONN_EVENT_NONE
Don't notify the trunk on connection state changes.
Definition trunk.h:73
@ TRUNK_CONN_EVENT_READ
Trunk should be notified if a connection is readable.
Definition trunk.h:75
#define TRUNK_CONN_ALL
All connection states.
Definition trunk.h:111
trunk_cancel_reason_t
Reasons for a request being cancelled.
Definition trunk.h:55
uint64_t _CONST req_alloc_new
How many requests we've allocated.
Definition trunk.h:326
connection_t *_CONST conn
The underlying connection.
Definition trunk.h:369
trunk_enqueue_t
Definition trunk.h:148
@ TRUNK_ENQUEUE_OK
Operation was successful.
Definition trunk.h:150
@ TRUNK_ENQUEUE_NO_CAPACITY
At maximum number of connections, and no connection has capacity.
Definition trunk.h:151
@ TRUNK_ENQUEUE_IN_BACKLOG
Request should be enqueued in backlog.
Definition trunk.h:149
void *_CONST preq
Data for the muxer to write to the connection.
Definition trunk.h:351
trunk_request_cancel_mux_t request_cancel_mux
!< Read one or more requests from a connection.
Definition trunk.h:746
trunk_request_state_t
Used for sanity checks and to simplify freeing.
Definition trunk.h:161
@ TRUNK_REQUEST_STATE_PARTIAL
Some of the request was written to the socket, more of it should be written later.
Definition trunk.h:170
@ TRUNK_REQUEST_STATE_CANCEL_SENT
We've informed the remote server that the request has been cancelled.
Definition trunk.h:185
@ TRUNK_REQUEST_STATE_CANCEL
A request on a particular socket was cancel.
Definition trunk.h:184
@ TRUNK_REQUEST_STATE_CANCEL_PARTIAL
We partially wrote a cancellation request.
Definition trunk.h:187
@ TRUNK_REQUEST_STATE_BACKLOG
In the backlog.
Definition trunk.h:167
@ TRUNK_REQUEST_STATE_PENDING
In the queue of a connection and is pending writing.
Definition trunk.h:168
@ TRUNK_REQUEST_STATE_SENT
Was written to a socket. Waiting for a response.
Definition trunk.h:172
Common configuration parameters for a trunk.
Definition trunk.h:224
I/O functions to pass to trunk_alloc.
Definition trunk.h:732
trunk_request_t * treq
Trunk request.
Definition trunk_tests.c:12
#define DEBUG_LVL_SET
Definition trunk_tests.c:29
TEST_LIST
static void test_demux(UNUSED fr_event_list_t *el, UNUSED trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
uint64_t cancelled
Count of tests in this run that were cancelled.
Definition trunk_tests.c:23
uint64_t freed
Count of tests in this run that were freed.
Definition trunk_tests.c:26
static connection_t * test_setup_socket_pair_connection_alloc(trunk_connection_t *tconn, fr_event_list_t *el, connection_conf_t const *conn_conf, char const *log_prefix, UNUSED void *uctx)
static void test_enqueue_and_io_speed(void)
bool cancelled
Seen by the cancelled callback.
Definition trunk_tests.c:13
static void _conn_io_error(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, UNUSED int fd_errno, void *uctx)
static void _conn_io_loopback(UNUSED fr_event_list_t *el, int fd, UNUSED int flags, void *uctx)
Whenever the second socket in a socket pair is readable, read all pending data, and write it back.
static void test_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
Definition trunk_tests.c:31
static void test_socket_pair_alloc_then_connect_timeout(void)
static void test_cancel_mux(UNUSED fr_event_list_t *el, trunk_connection_t *tconn, connection_t *conn, UNUSED void *uctx)
Definition trunk_tests.c:63
static void _conn_io_write(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
static void test_partial_to_complete_states(void)
static connection_t * test_setup_socket_pair_1s_timeout_connection_alloc(trunk_connection_t *tconn, fr_event_list_t *el, UNUSED connection_conf_t const *conf, char const *log_prefix, void *uctx)
static void test_request_free(UNUSED request_t *request, void *preq, void *uctx)
static void test_socket_pair_alloc_then_free(void)
bool failed
Seen by the failed callback.
Definition trunk_tests.c:15
static void test_request_fail(UNUSED request_t *request, void *preq, UNUSED void *rctx, UNUSED trunk_request_state_t state, void *uctx)
static void test_connection_rebalance_requests(void)
static connection_state_t _conn_init_no_signal(void **h_out, connection_t *conn, UNUSED void *uctx)
static connection_t * test_setup_socket_pair_1s_reconnection_delay_alloc(trunk_connection_t *tconn, fr_event_list_t *el, UNUSED connection_conf_t const *conn_conf, char const *log_prefix, void *uctx)
static void test_connection_levels_max(void)
static void test_socket_pair_alloc_then_reconnect_check_delay(void)
bool freed
Seen by the free callback.
Definition trunk_tests.c:16
uint64_t failed
Count of tests in this run that failed.
Definition trunk_tests.c:25
static void _conn_io_read(UNUSED fr_event_list_t *el, UNUSED int fd, UNUSED int flags, void *uctx)
bool completed
Seen by the complete callback.
Definition trunk_tests.c:14
static void _conn_notify(trunk_connection_t *tconn, connection_t *conn, fr_event_list_t *el, trunk_connection_event_t notify_on, UNUSED void *uctx)
#define ALLOC_REQ(_id)
int priority
Priority of request.
Definition trunk_tests.c:19
static void test_socket_pair_alloc_then_reconnect_then_free(void)
static trunk_t * test_setup_trunk(TALLOC_CTX *ctx, fr_event_list_t *el, trunk_conf_t *conf, bool with_cancel_mux, void *uctx)
static connection_state_t _conn_open(fr_event_list_t *el, void *h, UNUSED void *uctx)
Insert I/O handlers that loop any data back round.
static void test_enqueue_cancellation_points(void)
static void test_requeue_on_reconnect(void)
static void test_connection_start_on_enqueue(void)
static int8_t test_preq_cmp(void const *a, void const *b)
bool signal_partial
Muxer should signal that this request is partially written.
Definition trunk_tests.c:17
static void test_request_complete(UNUSED request_t *request, void *preq, UNUSED void *rctx, void *uctx)
static void test_request_cancel(UNUSED connection_t *conn, void *preq, UNUSED trunk_cancel_reason_t reason, void *uctx)
static void test_enqueue_basic(void)
uint64_t completed
Count of tests in this run that completed.
Definition trunk_tests.c:24
static void test_connection_levels_alternating_edges(void)
static void _conn_close(UNUSED fr_event_list_t *el, void *h, UNUSED void *uctx)
static connection_state_t _conn_init(void **h_out, connection_t *conn, UNUSED void *uctx)
Allocate a basic socket pair.
bool signal_cancel_partial
Muxer should signal that this request is partially cancelled.
Definition trunk_tests.c:18
close(uq->fd)
static fr_event_list_t * el
void fr_perror(char const *fmt,...)
Print the current error to stderr with a prefix.
Definition strerror.c:733
#define fr_box_time_delta(_val)
Definition value.h:343