The FreeRADIUS server $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
Loading...
Searching...
No Matches
lst.c
Go to the documentation of this file.
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
15 */
16
17/** Functions for a Leftmost Skeleton Tree
18 *
19 * @file src/lib/util/lst.c
20 *
21 * @copyright 2021 Network RADIUS SAS (legal@networkradius.com)
22 */
23RCSID("$Id: 0d1afa2f48e13d67958aa272eea1d49c20178b2a $")
24
25#include <freeradius-devel/util/lst.h>
26#include <freeradius-devel/util/misc.h>
27#include <freeradius-devel/util/rand.h>
28#include <freeradius-devel/util/strerror.h>
29
30/*
31 * Leftmost Skeleton Trees are defined in "Stronger Quickheaps" (Gonzalo Navarro,
32 * Rodrigo Paredes, Patricio V. Poblete, and Peter Sanders) International Journal
33 * of Foundations of Computer Science, November 2011. As the title suggests, it
34 * is inspired by quickheaps, and indeed the underlying representation looks
35 * like a quickheap.
36 *
37 * heap/priority queue operations are defined in the paper in terms of LST
38 * operations.
39 */
40
41/*
42 * The LST as defined in the paper has a fixed size set at creation.
43 * Here, as with quickheaps, but we want to allow for expansion...
44 * though given that, as the paper shows, the expected stack depth
45 * is proportion to the log of the number of items in the LST, expanding
46 * the pivot stack may be a rare event.
47 */
48#define INITIAL_CAPACITY 2048
49
50#define is_power_of_2(_n) ((_n) && (((_n) & ((_n) - 1)) == 0))
51
52typedef unsigned int stack_index_t;
53
54typedef struct {
55 stack_index_t depth; //!< The current stack depth.
56 unsigned int size; //!< The current stack size (number of frames)
57 fr_lst_index_t *data; //!< Array of indices of the pivots (sometimes called roots)
59
60struct fr_lst_s {
61 unsigned int capacity; //!< Number of elements that will fit
62 fr_lst_index_t idx; //!< Starting index, initially zero
63 unsigned int num_elements; //!< Number of elements in the LST
64 size_t offset; //!< Offset of heap index in element structure.
65 void **p; //!< Array of elements.
66 pivot_stack_t s; //!< Stack of pivots, always with depth >= 1.
67 fr_fast_rand_t rand_ctx; //!< Seed for random choices.
68 char const *type; //!< Type of elements.
69 fr_lst_cmp_t cmp; //!< Comparator function.
70};
71
72static inline fr_lst_index_t stack_item(pivot_stack_t const *s, stack_index_t idx) CC_HINT(always_inline, nonnull);
73static inline stack_index_t lst_length(fr_lst_t const *lst, stack_index_t stack_index) CC_HINT(always_inline, nonnull);
74
75static inline CC_HINT(always_inline, nonnull) void *index_addr(fr_lst_t const *lst, void *data)
76{
77 return ((uint8_t *)data) + (lst)->offset;
78}
79
80/*
81 * Concerning item_index() and item_index_set():
82 * To let zero be the value *as stored in an item* that indicates not being in an LST,
83 * we add one to the real index when storing it and subtract one when retrieving it.
84 *
85 * This lets the LST functions use item indices in [0, lst->capacity), important for
86 * 1. the circular array, which allows an important optimization for fr_lst_pop()
87 * 2. quick reduction of indices
88 *
89 * fr_item_insert() needs to see the value actually stored, hence raw_item_index().
90 */
91static inline CC_HINT(always_inline, nonnull) fr_lst_index_t raw_item_index(fr_lst_t const *lst, void *data)
92{
93 return *(fr_lst_index_t *)index_addr(lst, data);
94}
95
96static inline CC_HINT(always_inline, nonnull) fr_lst_index_t item_index(fr_lst_t const *lst, void *data)
97{
98 return raw_item_index(lst, data) - 1;
99}
100
101static inline CC_HINT(always_inline, nonnull) void item_index_set(fr_lst_t *lst, void *data, fr_lst_index_t idx)
102{
103 (*(fr_lst_index_t *)index_addr(lst, data)) = idx + 1;
104}
105
106static inline CC_HINT(always_inline, nonnull) fr_lst_index_t index_reduce(fr_lst_t const *lst, fr_lst_index_t idx)
107{
108 return idx & ((lst)->capacity - 1);
109}
110
111static inline CC_HINT(always_inline, nonnull)
113{
114 return (index_reduce(lst, idx1 - idx2) == 0);
115}
116
117static inline CC_HINT(always_inline, nonnull) void item_set(fr_lst_t *lst, fr_lst_index_t idx, void *data)
118{
119 lst->p[index_reduce(lst, idx)] = data;
120}
121
122static inline CC_HINT(always_inline, nonnull) void *item(fr_lst_t const *lst, fr_lst_index_t idx)
123{
124 return (lst->p[index_reduce(lst, idx)]);
125}
126
127static inline CC_HINT(always_inline, nonnull) void *pivot_item(fr_lst_t const *lst, stack_index_t idx)
128{
129 return item(lst, stack_item(&lst->s, idx));
130}
131
132/*
133 * The paper defines randomized priority queue operations appropriately for the
134 * sum type definition the authors use for LSTs, which are used to implement the
135 * RPQ operations. This code, however, deals with the internal representation,
136 * including the root/pivot stack, which must change as the LST changes. Also, an
137 * insertion or deletion may shift the position of any number of buckets or change
138 * the number of buckets.
139 *
140 * So... for those operations, we will pass in the pointer to the LST, but
141 * internally, we'll represent it and its subtrees with an (LST pointer, stack index)
142 * pair. The index is that of the least pivot greater than or equal to all items in
143 * the subtree (considering the "fictitious" pivot greater than anything, so (lst, 0)
144 * represents the entire tree.
145 *
146 * The fictitious pivot at the bottom of the stack isn't actually in the array,
147 * so don't try to refer to what's there.
148 *
149 * The index is visible for the size and length functions, since they need
150 * to know the subtree they're working on.
151 */
152static inline CC_HINT(always_inline, nonnull) bool is_bucket(fr_lst_t const *lst, stack_index_t idx)
153{
154 return lst_length(lst, idx) == 1;
155}
156
158{
160 unsigned int n_size;
161
162#ifndef NDEBUG
163 /*
164 * This likely can't happen, we just include
165 * the guard to keep static analysis happy.
166 */
167 if (unlikely(s->size > (UINT_MAX - s->size))) {
168 if (s->size == UINT_MAX) {
169 fr_strerror_const("lst stack is full");
170 return false;
171 } else {
172 n_size = UINT_MAX;
173 }
174 } else {
175#endif
176 n_size = s->size * 2;
177#ifndef NDEBUG
178 }
179#endif
180
181 n = talloc_realloc(lst, s->data, fr_lst_index_t, n_size);
182 if (unlikely(!n)) {
183 fr_strerror_printf("Failed expanding lst stack to %u elements (%zu bytes)",
184 n_size, n_size * sizeof(fr_lst_index_t));
185 return false;
186 }
187
188 s->size = n_size;
189 s->data = n;
190 return true;
191}
192
193static inline CC_HINT(always_inline, nonnull) int stack_push(fr_lst_t *lst, pivot_stack_t *s, fr_lst_index_t pivot)
194{
195 if (unlikely(s->depth == s->size && !stack_expand(lst, s))) return -1;
196
197 s->data[s->depth++] = pivot;
198 return 0;
199}
200
201static inline CC_HINT(always_inline, nonnull) void stack_pop(pivot_stack_t *s, unsigned int n)
202{
203 s->depth -= n;
204}
205
206static inline CC_HINT(always_inline, nonnull) stack_index_t stack_depth(pivot_stack_t const *s)
207{
208 return s->depth;
209}
210
212{
213 return s->data[idx];
214}
215
216static inline CC_HINT(always_inline, nonnull)
218{
219 s->data[idx] = new_value;
220}
221
222fr_lst_t *_fr_lst_alloc(TALLOC_CTX *ctx, fr_lst_cmp_t cmp, char const *type, size_t offset, fr_lst_index_t init)
223{
224 fr_lst_t *lst;
225 pivot_stack_t *s;
226 unsigned int initial_stack_capacity;
227
228 if (!init) {
230 } else if (!is_power_of_2(init)) {
231 init = 1 << fr_high_bit_pos(init);
232 }
233
234 for (initial_stack_capacity = 1; (1U << initial_stack_capacity) < init; initial_stack_capacity++) ;
235
236 /*
237 * Pre-allocate stack memory as it is
238 * unlikely to need to grow in practice.
239 *
240 * We don't pre-allocate the array of elements
241 * If we pre-allocated the array of elements
242 * we'd end up wasting that memory as soon as
243 * we needed to expand the array.
244 *
245 * Pre-allocating three chunks appears to be
246 * the optimum.
247 */
248 lst = talloc_zero_pooled_object(ctx, fr_lst_t, 3, (initial_stack_capacity * sizeof(fr_lst_index_t)));
249 if (unlikely(!lst)) return NULL;
250
251 lst->capacity = init;
252 lst->p = talloc_array(lst, void *, lst->capacity);
253 if (unlikely(!lst->p)) {
254 cleanup:
255 talloc_free(lst);
256 return NULL;
257 }
258
259 /*
260 * Allocate the initial stack
261 */
262 s = &lst->s;
263 s->data = talloc_array(lst, fr_lst_index_t, initial_stack_capacity);
264 if (unlikely(!s->data)) goto cleanup;
265 s->depth = 0;
266 s->size = initial_stack_capacity;
267
268 /* Initially the LST is empty and we start at the beginning of the array */
269 stack_push(lst, &lst->s, 0);
270
271 lst->idx = 0;
272
273 /* Prepare for random choices */
274 lst->rand_ctx.a = fr_rand();
275 lst->rand_ctx.b = fr_rand();
276
277 lst->type = type;
278 lst->cmp = cmp;
279 lst->offset = offset;
280
281 return lst;
282}
283
284/** The length function for LSTs (how many buckets it contains)
285 *
286 */
287static inline stack_index_t lst_length(fr_lst_t const *lst, stack_index_t stack_index)
288{
289 return stack_depth(&lst->s) - stack_index;
290}
291
292/** The size function for LSTs (number of items a (sub)tree contains)
293 *
294 */
295static CC_HINT(nonnull) fr_lst_index_t lst_size(fr_lst_t *lst, stack_index_t stack_index)
296{
297 fr_lst_index_t reduced_right, reduced_idx;
298
299 if (stack_index == 0) return lst->num_elements;
300
301 reduced_right = index_reduce(lst, stack_item(&lst->s, stack_index));
302 reduced_idx = index_reduce(lst, lst->idx);
303
304 if (reduced_idx <= reduced_right) return reduced_right - reduced_idx; /* No wraparound--easy. */
305
306 return (lst->capacity - reduced_idx) + reduced_right;
307}
308
309/** Flatten an LST, i.e. turn it into the base-case one bucket [sub]tree
310 *
311 * NOTE: so doing leaves the passed stack_index valid--we just add
312 * everything once in the left subtree to it.
313 */
314static inline CC_HINT(always_inline, nonnull) void lst_flatten(fr_lst_t *lst, stack_index_t stack_index)
315{
316 stack_pop(&lst->s, stack_depth(&lst->s) - stack_index);
317}
318
319/** Move data to a specific location in an LST's array.
320 *
321 * The caller must have made sure the location is available and exists
322 * in said array.
323 */
324static inline CC_HINT(always_inline, nonnull) void lst_move(fr_lst_t *lst, fr_lst_index_t location, void *data)
325{
326 item_set(lst, location, data);
327 item_index_set(lst, data, index_reduce(lst, location));
328}
329
330/** Add data to the bucket of a specified (sub)tree.
331 *
332 */
333static void bucket_add(fr_lst_t *lst, stack_index_t stack_index, void *data)
334{
335 fr_lst_index_t new_space;
336 stack_index_t ridx;
337
338 /*
339 * For each bucket to the right, starting from the top,
340 * make a space available at the top and move the bottom item
341 * into it. Since ordering within a bucket doesn't matter, we
342 * can do that, minimizing moving and index adjustment.
343 *
344 * The fictitious pivot doesn't correspond to an actual value,
345 * so we save pivot moving for the end of the loop.
346 */
347 for (ridx = 0; ridx < stack_index; ridx++) {
348 fr_lst_index_t prev_pivot_index = stack_item(&lst->s, ridx + 1);
349 bool empty_bucket;
350
351 new_space = stack_item(&lst->s, ridx);
352 empty_bucket = (new_space - prev_pivot_index) == 1;
353 stack_set(&lst->s, ridx, new_space + 1);
354
355 if (!empty_bucket) lst_move(lst, new_space, item(lst, prev_pivot_index + 1));
356
357 /* move the pivot up, leaving space for the next bucket */
358 lst_move(lst, prev_pivot_index + 1, item(lst, prev_pivot_index));
359 }
360
361 /*
362 * If the bucket isn't the leftmost, the above loop has made space
363 * available where the pivot used to be.
364 * If it is the leftmost, the loop wasn't executed, but the fictitious
365 * pivot isn't there, which is just as good.
366 */
367 new_space = stack_item(&lst->s, stack_index);
368 stack_set(&lst->s, stack_index, new_space + 1);
369 lst_move(lst, new_space, data);
370
371 lst->num_elements++;
372}
373
374/** Reduce pivot stack indices based on their difference from lst->idx, and then reduce lst->idx
375 *
376 */
378{
379 fr_lst_index_t reduced_idx = index_reduce(lst, lst->idx);
380 stack_index_t depth = stack_depth(&lst->s), i;
381
382 for (i = 0; i < depth; i++) stack_set(&lst->s, i, reduced_idx + stack_item(&lst->s, i) - lst->idx);
383
384 lst->idx = reduced_idx;
385}
386
387/** Make more space available in an LST
388 *
389 * The LST paper only mentions this option in passing, pointing out that it's O(n); the only
390 * constructor in the paper lets you hand it an array of items to initially insert
391 * in the LST, so elements will have to be removed to make room for more (though it's
392 * easy to see how one could specify extra space).
393 *
394 * Were it not for the circular array optimization, it would be talloc_realloc() and done;
395 * it works or it doesn't. (That's still O(n), since it may require copying the data.)
396 *
397 * With the circular array optimization, if lst->idx refers to something other than the
398 * beginning of the array, you have to move the elements preceding it to beginning of the
399 * newly-available space so it's still contiguous, and keep pivot stack entries consistent
400 * with the positions of the elements.
401 */
402static bool lst_expand(fr_lst_t *lst)
403{
404 void **n;
405 unsigned int old_capacity = lst->capacity, n_capacity;
407
408 if (unlikely(old_capacity > (UINT_MAX - old_capacity))) {
409 if (old_capacity == UINT_MAX) {
410 fr_strerror_const("lst is full");
411 return false;
412 } else {
413 n_capacity = UINT_MAX;
414 }
415 } else {
416 n_capacity = old_capacity * 2;
417 }
418
419 n = talloc_realloc(lst, lst->p, void *, n_capacity);
420 if (unlikely(!n)) {
421 fr_strerror_printf("Failed expanding lst to %u elements (%zu bytes)",
422 n_capacity, n_capacity * sizeof(void *));
423 return false;
424 }
425
426 lst->p = n;
427 lst->capacity = n_capacity;
428
430
431 for (i = 0; i < lst->idx; i++) {
432 void *to_be_moved = item(lst, i);
433 fr_lst_index_t new_index = item_index(lst, to_be_moved) + old_capacity;
434
435 lst_move(lst, new_index, to_be_moved);
436 }
437
438 return true;
439}
440
441static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_lwb(fr_lst_t const *lst, stack_index_t stack_index)
442{
443 if (is_bucket(lst, stack_index)) return lst->idx;
444
445 return stack_item(&lst->s, stack_index + 1) + 1;
446}
447
448/*
449 * Note: buckets can be empty,
450 */
451static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_upb(fr_lst_t const *lst, stack_index_t stack_index)
452{
453 return stack_item(&lst->s, stack_index) - 1;
454}
455
456/*
457 * Partition an LST
458 * It's only called for trees that are a single nonempty bucket;
459 * if it's a subtree, it is thus necessarily the leftmost.
460 */
461static void partition(fr_lst_t *lst, stack_index_t stack_index)
462{
463 fr_lst_index_t low = bucket_lwb(lst, stack_index);
464 fr_lst_index_t high = bucket_upb(lst, stack_index);
465 fr_lst_index_t l, h;
466 fr_lst_index_t pivot_index;
467 void *pivot;
468 void *temp;
469
470 /*
471 * Hoare partition doesn't do the trivial case, so catch it here.
472 */
473 if (is_equivalent(lst, low, high)) {
474 stack_push(lst, &lst->s, low);
475 return;
476 }
477
478 pivot_index = low + (fr_fast_rand(&lst->rand_ctx) % (high + 1 - low));
479 pivot = item(lst, pivot_index);
480
481 if (pivot_index != low) {
482 lst_move(lst, pivot_index, item(lst, low));
483 lst_move(lst, low, pivot);
484 }
485
486 /*
487 * Hoare partition; on the avaerage, it does a third the swaps of
488 * Lomuto.
489 */
490 l = low - 1;
491 h = high + 1;
492 for (;;) {
493 while (lst->cmp(item(lst, --h), pivot) > 0) ;
494 while (lst->cmp(item(lst, ++l), pivot) < 0) ;
495 if (l >= h) break;
496 temp = item(lst, l);
497 lst_move(lst, l, item(lst, h));
498 lst_move(lst, h, temp);
499 }
500
501 /*
502 * Hoare partition doesn't guarantee the pivot sits at location h
503 * the way Lomuto does and LST needs, so first get its location...
504 */
505 pivot_index = item_index(lst, pivot);
506 if (pivot_index >= index_reduce(lst, low)) {
507 pivot_index = low + pivot_index - index_reduce(lst, low);
508 } else {
509 pivot_index = high - (index_reduce(lst, high) - pivot_index);
510 }
511
512 /*
513 * ...and then move it if need be.
514 */
515 if (pivot_index < h) {
516 lst_move(lst, pivot_index, item(lst, h));
517 lst_move(lst, h, pivot);
518 }
519 if (pivot_index > h) {
520 h++;
521 lst_move(lst, pivot_index, item(lst, h));
522 lst_move(lst, h, pivot);
523 }
524
525 stack_push(lst, &lst->s, h);
526}
527
528/*
529 * Delete an item from a bucket in an LST
530 */
531static void bucket_delete(fr_lst_t *lst, stack_index_t stack_index, void *data)
532{
533 fr_lst_index_t location = item_index(lst, data);
534 fr_lst_index_t top;
535
536 if (is_equivalent(lst, location, lst->idx)) {
537 lst->idx++;
538 if (is_equivalent(lst, lst->idx, 0)) lst_indices_reduce(lst);
539 } else {
540 for (;;) {
541 top = bucket_upb(lst, stack_index);
542 if (!is_equivalent(lst, location, top)) lst_move(lst, location, item(lst, top));
543 stack_set(&lst->s, stack_index, top);
544 if (stack_index == 0) break;
545 lst_move(lst, top, item(lst, top + 1));
546 stack_index--;
547 location = top + 1;
548 }
549 }
550
551 lst->num_elements--;
552 item_index_set(lst, data, -1);
553}
554
555/*
556 * We precede each function that does the real work with a Pythonish
557 * (but colon-free) version of the pseudocode from the paper.
558 *
559 * clang, in version 13, will have a way to force tail call optimization
560 * with a "musttail" attribute. gcc has -f-foptimize-sibling-calls, but
561 * it works only with -O[23s]. For now, -O2 will assure TCO. In its absence,
562 * the recursion depth is bounded by the number of pivot stack entries, aka
563 * the "length" of the LST, which has an expected value proportional to
564 * log(number of nodes).
565 *
566 * NOTE: inlining a recursive function is not advisable, so no
567 * always_inline here.
568 */
569
570/*
571 * ExtractMin(LST T ) // assumes s(T ) > 0
572 * If T = bucket(B) Then
573 * Partition(T ) // O(|B|)
574 * Let T = tree(r, L, B )
575 * If s(L) = 0 Then
576 * Flatten T into bucket(B ) // O(1)
577 * Remove r from bucket B // O(1)
578 * Return r
579 * Else
580 * Return ExtractMin(L)
581 */
582static inline CC_HINT(nonnull) void *_fr_lst_pop(fr_lst_t *lst, stack_index_t stack_index)
583{
584 if (is_bucket(lst, stack_index)) partition(lst, stack_index);
585 ++stack_index;
586 if (lst_size(lst, stack_index) == 0) {
587 void *min = pivot_item(lst, stack_index);
588
589 lst_flatten(lst, stack_index);
590 bucket_delete(lst, stack_index, min);
591 return min;
592 }
593 return _fr_lst_pop(lst, stack_index);
594}
595
596/*
597 * FindMin(LST T ) // assumes s(T ) > 0
598 * If T = bucket(B) Then
599 * Partition(T ) // O(|B|)
600 * Let T = tree(r, L, B )
601 * If s(L) = 0 Then
602 * Return r
603 * Else
604 * Return FindMin(L)
605 */
606static inline CC_HINT(nonnull) void *_fr_lst_peek(fr_lst_t *lst, stack_index_t stack_index)
607{
608 if (is_bucket(lst, stack_index)) partition(lst, stack_index);
609 ++stack_index;
610 if (lst_size(lst, stack_index) == 0) return pivot_item(lst, stack_index);
611 return _fr_lst_peek(lst, stack_index);
612}
613
614/*
615 * Delete(LST T, x ∈ Z)
616 * If T = bucket(B) Then
617 * Remove x from bucket B // O(depth)
618 * Else
619 * Let T = tree(r, L, B′)
620 * If x < r Then
621 * Delete(L, x)
622 * Else If x > r Then
623 * Remove x from bucket B ′ // O(depth)
624 * Else
625 * Flatten T into bucket(B′′) // O(1)
626 * Remove x from bucket B′′ // O(depth)
627 */
628static inline CC_HINT(nonnull) void _fr_lst_extract(fr_lst_t *lst, stack_index_t stack_index, void *data)
629{
630 int8_t cmp;
631
632 if (is_bucket(lst, stack_index)) {
633 bucket_delete(lst, stack_index, data);
634 return;
635 }
636 stack_index++;
637 cmp = lst->cmp(data, pivot_item(lst, stack_index));
638 if (cmp < 0) {
639 _fr_lst_extract(lst, stack_index, data);
640 } else if (cmp > 0) {
641 bucket_delete(lst, stack_index - 1, data);
642 } else {
643 lst_flatten(lst, stack_index);
644 bucket_delete(lst, stack_index, data);
645 }
646}
647
648/*
649 * Insert(LST T, x ∈ Z)
650 * If T = bucket(B) Then
651 * Add x to bucket B // O(depth)
652 * Else
653 * Let T = tree(r, L, B)
654 * If random(s(T) + 1) != 1 Then
655 * If x < r Then
656 * Insert(L, x)
657 * Else
658 * Add x to bucket B // O(depth)
659 * Else
660 * Flatten T into bucket(B′) // O(1)
661 * Add x to bucket B′ // O(depth)
662 */
663static inline CC_HINT(nonnull) void _fr_lst_insert(fr_lst_t *lst, stack_index_t stack_index, void *data)
664{
665#ifndef TALLOC_GET_TYPE_ABORT_NOOP
666 if (lst->type) (void)_talloc_get_type_abort(data, lst->type, __location__);
667#endif
668
669 if (is_bucket(lst, stack_index)) {
670 bucket_add(lst, stack_index, data);
671 return;
672 }
673 stack_index++;
674 if (fr_fast_rand(&lst->rand_ctx) % (lst_size(lst, stack_index) + 1) != 0) {
675 if (lst->cmp(data, pivot_item(lst, stack_index)) < 0) {
676 _fr_lst_insert(lst, stack_index, data);
677 } else {
678 bucket_add(lst, stack_index - 1, data);
679 }
680 } else {
681 lst_flatten(lst, stack_index);
682 bucket_add(lst, stack_index, data);
683 }
684}
685
686/*
687 * We represent a (sub)tree with an (lst, stack index) pair, so
688 * fr_lst_pop(), fr_lst_peek(), and fr_lst_extract() are minimal
689 * wrappers that
690 *
691 * (1) hide our representation from the user and preserve the interface
692 * (2) check preconditions
693 */
694
696{
697 if (unlikely(lst->num_elements == 0)) return NULL;
698 return _fr_lst_pop(lst, 0);
699}
700
702{
703 if (unlikely(lst->num_elements == 0)) return NULL;
704 return _fr_lst_peek(lst, 0);
705}
706
707/** Remove an element from an LST
708 *
709 * @param[in] lst the LST to remove an element from
710 * @param[in] data the element to remove
711 * @return
712 * - 0 if removal succeeds
713 * - -1 if removal fails
714 */
716{
717 if (unlikely(lst->num_elements == 0)) {
718 fr_strerror_const("Tried to extract element from empty LST");
719 return -1;
720 }
721
722 if (unlikely(raw_item_index(lst, data) == 0)) {
723 fr_strerror_const("Tried to extract element not in LST");
724 return -1;
725 }
726
727 _fr_lst_extract(lst, 0, data);
728 return 0;
729}
730
732{
733 /*
734 * Expand if need be. Not in the paper, but we want the capability.
735 */
736 if (unlikely((lst->num_elements == lst->capacity) && !lst_expand(lst))) return -1;
737
738 /*
739 * Don't insert something that looks like it's already in an LST.
740 */
741 if (unlikely(raw_item_index(lst, data) > 0)) {
742 fr_strerror_const("Node is already in the LST");
743 return -1;
744 }
745
746 _fr_lst_insert(lst, 0, data);
747 return 0;
748}
749
751{
752 return lst->num_elements;
753}
754
755/** Iterate over entries in LST
756 *
757 * @note If the LST is modified, the iterator should be considered invalidated.
758 *
759 * @param[in] lst to iterate over.
760 * @param[in] iter Pointer to an iterator struct, used to maintain
761 * state between calls.
762 * @return
763 * - User data.
764 * - NULL if at the end of the list.
765 */
767{
768 if (unlikely(lst->num_elements == 0)) return NULL;
769
770 *iter = lst->idx;
771 return item(lst, *iter);
772}
773
774/** Get the next entry in an LST
775 *
776 * @note If the LST is modified, the iterator should be considered invalidated.
777 *
778 * @param[in] lst to iterate over.
779 * @param[in] iter Pointer to an iterator struct, used to maintain
780 * state between calls.
781 * @return
782 * - User data.
783 * - NULL if at the end of the list.
784 */
786{
787 if ((*iter + 1) >= stack_item(&lst->s, 0)) return NULL;
788 *iter += 1;
789
790 return item(lst, *iter);
791}
792
793#ifndef TALLOC_GET_TYPE_ABORT_NOOP
794void fr_lst_verify(char const *file, int line, fr_lst_t const *lst)
795{
796 fr_lst_index_t fake_pivot_index, reduced_fake_pivot_index, reduced_end;
797 stack_index_t depth = stack_depth(&(lst->s));
798 int bucket_size_sum;
799 bool pivots_in_order = true;
800 bool pivot_indices_in_order = true;
801
802 fr_fatal_assert_msg(lst, "CONSISTENCY CHECK FAILED %s[%i]: LST pointer NULL", file, line);
803 talloc_get_type_abort(lst, fr_lst_t);
804
805 /*
806 * There must be at least the fictitious pivot.
807 */
808 fr_fatal_assert_msg(depth >= 1, "CONSISTENCY CHECK FAILED %s[%i]: LST pivot stack empty", file, line);
809
810 /*
811 * Modulo circularity, idx + the number of elements should be the index
812 * of the fictitious pivot.
813 */
814 fake_pivot_index = stack_item(&(lst->s), 0);
815 reduced_fake_pivot_index = index_reduce(lst, fake_pivot_index);
816 reduced_end = index_reduce(lst, lst->idx + lst->num_elements);
817 fr_fatal_assert_msg(reduced_fake_pivot_index == reduced_end,
818 "CONSISTENCY CHECK FAILED %s[%i]: fictitious pivot doesn't point past last element",
819 file, line);
820
821 /*
822 * Bucket sizes must make sense.
823 */
824 if (lst->num_elements) {
825 bucket_size_sum = 0;
826
827 for (stack_index_t stack_index = 0; stack_index < depth; stack_index++) {
828 fr_lst_index_t bucket_size = bucket_upb(lst, stack_index) - bucket_lwb(lst, stack_index) + 1;
829 fr_fatal_assert_msg(bucket_size <= lst->num_elements,
830 "CONSISTENCY CHECK FAILED %s[%i]: bucket %u size %u is invalid",
831 file, line, stack_index, bucket_size);
832 bucket_size_sum += bucket_size;
833 }
834
835 fr_fatal_assert_msg(bucket_size_sum + depth - 1 == lst->num_elements,
836 "CONSISTENCY CHECK FAILED %s[%i]: buckets inconsistent with number of elements",
837 file, line);
838 }
839
840 /*
841 * No elements should be NULL;
842 * they should have the correct index stored,
843 * and if a type is specified, they should point at something of that type,
844 */
845 for (fr_lst_index_t i = 0; i < lst->num_elements; i++) {
846 void *element = item(lst, lst->idx + i);
847
848 fr_fatal_assert_msg(element, "CONSISTENCY CHECK FAILED %s[%i]: null element pointer at %u",
849 file, line, lst->idx + i);
850 fr_fatal_assert_msg(is_equivalent(lst, lst->idx + i, item_index(lst, element)),
851 "CONSISTENCY CHECK FAILED %s[%i]: element %u index mismatch", file, line, i);
852 if (lst->type) (void) _talloc_get_type_abort(element, lst->type, __location__);
853 }
854
855 /*
856 * There's nothing more to check for a one-bucket tree.
857 */
858 if (is_bucket(lst, 0)) return;
859
860 /*
861 * Otherwise, first, pivots from left to right (aside from the fictitious
862 * one) should be in ascending order.
863 */
864 for (stack_index_t stack_index = 1; stack_index + 1 < depth; stack_index++) {
865 void *current_pivot = pivot_item(lst, stack_index);
866 void *next_pivot = pivot_item(lst, stack_index + 1);
867
868 if (current_pivot && next_pivot && lst->cmp(current_pivot, next_pivot) < 0) pivots_in_order = false;
869 }
870 fr_fatal_assert_msg(pivots_in_order, "CONSISTENCY CHECK FAILED %s[%i]: pivots not in ascending order",
871 file, line);
872
873 /*
874 * Next, the stacked pivot indices should decrease as you ascend from
875 * the bottom of the pivot stack. Here we *do* include the fictitious
876 * pivot; we're just comparing indices.
877 */
878 for (stack_index_t stack_index = 0; stack_index + 1 < depth; stack_index++) {
879 fr_lst_index_t current_pivot_index = stack_item(&(lst->s), stack_index);
880 fr_lst_index_t previous_pivot_index = stack_item(&(lst->s), stack_index + 1);
881
882 if (previous_pivot_index >= current_pivot_index) pivot_indices_in_order = false;
883 }
884 fr_fatal_assert_msg(pivot_indices_in_order, "CONSISTENCY CHECK FAILED %s[%i]: pivots indices not in order",
885 file, line);
886
887 /*
888 * Finally...
889 * values in buckets shouldn't "follow" the pivot to the immediate right (if it exists)
890 * and shouldn't "precede" the pivot to the immediate left (if it exists)
891 */
892 for (stack_index_t stack_index = 0; stack_index < depth; stack_index++) {
893 fr_lst_index_t lwb, upb, pivot_index;
894 void *pivot_item, *element;
895
896 if (stack_index > 0) {
897 lwb = (stack_index + 1 == depth) ? lst->idx : stack_item(&(lst->s), stack_index + 1);
898 pivot_index = upb = stack_item(&(lst->s), stack_index);
899 pivot_item = item(lst, pivot_index);
900 for (fr_lst_index_t index = lwb; index < upb; index++) {
901 element = item(lst, index);
902 fr_fatal_assert_msg(!element || !pivot_item || lst->cmp(element, pivot_item) <= 0,
903 "CONSISTENCY CHECK FAILED %s[%i]: element at %u > pivot at %u",
904 file, line, index, pivot_index);
905 }
906 }
907 if (stack_index + 1 < depth) {
908 upb = stack_item(&(lst->s), stack_index);
909 lwb = pivot_index = stack_item(&(lst->s), stack_index + 1);
910 pivot_item = item(lst, pivot_index);
911 for (fr_lst_index_t index = lwb; index < upb; index++) {
912 element = item(lst, index);
913 fr_fatal_assert_msg(!element || !pivot_item || lst->cmp(pivot_item, element) <= 0,
914 "CONSISTENCY CHECK FAILED %s[%i]: element at %u < pivot at %u",
915 file, line, index, pivot_index);
916 }
917 }
918 }
919}
920#endif
int const char * file
Definition acutest.h:702
int n
Definition acutest.h:577
int const char int line
Definition acutest.h:702
static bool init
Definition fuzzer.c:41
#define RCSID(id)
Definition build.h:483
#define unlikely(_x)
Definition build.h:381
static size_t min(size_t x, size_t y)
Definition dbuff.c:66
#define fr_fatal_assert_msg(_x, _fmt,...)
Calls panic_action ifndef NDEBUG, else logs error and causes the server to exit immediately with code...
Definition debug.h:184
talloc_free(reap)
static int stack_depth
Definition radmin.c:156
void * fr_lst_iter_next(fr_lst_t *lst, fr_lst_iter_t *iter)
Get the next entry in an LST.
Definition lst.c:785
int fr_lst_extract(fr_lst_t *lst, void *data)
Remove an element from an LST.
Definition lst.c:715
stack_index_t depth
The current stack depth.
Definition lst.c:55
static fr_lst_index_t stack_item(pivot_stack_t const *s, stack_index_t idx)
Definition lst.c:211
static void lst_indices_reduce(fr_lst_t *lst)
Reduce pivot stack indices based on their difference from lst->idx, and then reduce lst->idx.
Definition lst.c:377
void * fr_lst_iter_init(fr_lst_t *lst, fr_lst_iter_t *iter)
Iterate over entries in LST.
Definition lst.c:766
static void * item(fr_lst_t const *lst, fr_lst_index_t idx)
Definition lst.c:122
void fr_lst_verify(char const *file, int line, fr_lst_t const *lst)
Definition lst.c:794
void * fr_lst_pop(fr_lst_t *lst)
Definition lst.c:695
static void lst_flatten(fr_lst_t *lst, stack_index_t stack_index)
Flatten an LST, i.e.
Definition lst.c:314
#define is_power_of_2(_n)
Definition lst.c:50
static void * pivot_item(fr_lst_t const *lst, stack_index_t idx)
Definition lst.c:127
static void item_set(fr_lst_t *lst, fr_lst_index_t idx, void *data)
Definition lst.c:117
static fr_lst_index_t lst_size(fr_lst_t *lst, stack_index_t stack_index)
The size function for LSTs (number of items a (sub)tree contains)
Definition lst.c:295
static void partition(fr_lst_t *lst, stack_index_t stack_index)
Definition lst.c:461
unsigned int capacity
Number of elements that will fit.
Definition lst.c:61
static bool is_equivalent(fr_lst_t const *lst, fr_lst_index_t idx1, fr_lst_index_t idx2)
Definition lst.c:112
#define INITIAL_CAPACITY
Definition lst.c:48
char const * type
Type of elements.
Definition lst.c:68
pivot_stack_t s
Stack of pivots, always with depth >= 1.
Definition lst.c:66
static fr_lst_index_t index_reduce(fr_lst_t const *lst, fr_lst_index_t idx)
Definition lst.c:106
static void * index_addr(fr_lst_t const *lst, void *data)
Definition lst.c:75
static bool lst_expand(fr_lst_t *lst)
Make more space available in an LST.
Definition lst.c:402
static stack_index_t lst_length(fr_lst_t const *lst, stack_index_t stack_index)
The length function for LSTs (how many buckets it contains)
Definition lst.c:287
static void bucket_delete(fr_lst_t *lst, stack_index_t stack_index, void *data)
Definition lst.c:531
static void stack_set(pivot_stack_t *s, stack_index_t idx, fr_lst_index_t new_value)
Definition lst.c:217
fr_lst_index_t idx
Starting index, initially zero.
Definition lst.c:62
int fr_lst_insert(fr_lst_t *lst, void *data)
Definition lst.c:731
unsigned int fr_lst_num_elements(fr_lst_t *lst)
Definition lst.c:750
static void stack_pop(pivot_stack_t *s, unsigned int n)
Definition lst.c:201
static void bucket_add(fr_lst_t *lst, stack_index_t stack_index, void *data)
Add data to the bucket of a specified (sub)tree.
Definition lst.c:333
static void _fr_lst_extract(fr_lst_t *lst, stack_index_t stack_index, void *data)
Definition lst.c:628
static fr_lst_index_t raw_item_index(fr_lst_t const *lst, void *data)
Definition lst.c:91
static void * _fr_lst_pop(fr_lst_t *lst, stack_index_t stack_index)
Definition lst.c:582
fr_lst_t * _fr_lst_alloc(TALLOC_CTX *ctx, fr_lst_cmp_t cmp, char const *type, size_t offset, fr_lst_index_t init)
Definition lst.c:222
void * fr_lst_peek(fr_lst_t *lst)
Definition lst.c:701
unsigned int size
The current stack size (number of frames)
Definition lst.c:56
fr_lst_cmp_t cmp
Comparator function.
Definition lst.c:69
static void * _fr_lst_peek(fr_lst_t *lst, stack_index_t stack_index)
Definition lst.c:606
static void _fr_lst_insert(fr_lst_t *lst, stack_index_t stack_index, void *data)
Definition lst.c:663
unsigned int stack_index_t
Definition lst.c:52
fr_fast_rand_t rand_ctx
Seed for random choices.
Definition lst.c:67
unsigned int num_elements
Number of elements in the LST.
Definition lst.c:63
static void lst_move(fr_lst_t *lst, fr_lst_index_t location, void *data)
Move data to a specific location in an LST's array.
Definition lst.c:324
fr_lst_index_t * data
Array of indices of the pivots (sometimes called roots)
Definition lst.c:57
static fr_lst_index_t item_index(fr_lst_t const *lst, void *data)
Definition lst.c:96
static bool is_bucket(fr_lst_t const *lst, stack_index_t idx)
Definition lst.c:152
static void item_index_set(fr_lst_t *lst, void *data, fr_lst_index_t idx)
Definition lst.c:101
static fr_lst_index_t bucket_upb(fr_lst_t const *lst, stack_index_t stack_index)
Definition lst.c:451
static int stack_push(fr_lst_t *lst, pivot_stack_t *s, fr_lst_index_t pivot)
Definition lst.c:193
static fr_lst_index_t bucket_lwb(fr_lst_t const *lst, stack_index_t stack_index)
Definition lst.c:441
size_t offset
Offset of heap index in element structure.
Definition lst.c:64
static bool stack_expand(fr_lst_t *lst, pivot_stack_t *s)
Definition lst.c:157
void ** p
Array of elements.
Definition lst.c:65
Definition lst.c:60
fr_lst_index_t fr_lst_iter_t
Definition lst.h:45
int8_t(* fr_lst_cmp_t)(void const *a, void const *b)
Definition lst.h:51
unsigned int fr_lst_index_t
Definition lst.h:43
static uint8_t fr_high_bit_pos(uint64_t num)
Find the highest order high bit in an unsigned 64 bit integer.
Definition math.h:36
unsigned char bool
unsigned char uint8_t
static uint8_t depth(fr_minmax_heap_index_t i)
Definition minmax_heap.c:83
static bool cleanup
Definition radsniff.c:60
uint32_t fr_fast_rand(fr_fast_rand_t *ctx)
Definition rand.c:279
uint32_t fr_rand(void)
Return a 32-bit random number.
Definition rand.c:105
uint32_t b
Definition rand.h:55
uint32_t a
Definition rand.h:55
Smaller fast random number generator.
Definition rand.h:54
fr_aka_sim_id_type_t type
#define talloc_zero_pooled_object(_ctx, _type, _num_subobjects, _total_subobjects_size)
Definition talloc.h:177
#define fr_strerror_printf(_fmt,...)
Log to thread local error buffer.
Definition strerror.h:64
#define fr_strerror_const(_msg)
Definition strerror.h:223
static fr_slen_t data
Definition value.h:1265
int nonnull(2, 5))