The FreeRADIUS server $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
Loading...
Searching...
No Matches
lst.c
Go to the documentation of this file.
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
15 */
16
17/** Functions for a Leftmost Skeleton Tree
18 *
19 * @file src/lib/util/lst.c
20 *
21 * @copyright 2021 Network RADIUS SAS (legal@networkradius.com)
22 */
23RCSID("$Id: 1585f07f55589f3c217054332444b8118b880f9a $")
24
25#include <freeradius-devel/util/lst.h>
26#include <freeradius-devel/util/rand.h>
27#include <freeradius-devel/util/strerror.h>
28
29/*
30 * Leftmost Skeleton Trees are defined in "Stronger Quickheaps" (Gonzalo Navarro,
31 * Rodrigo Paredes, Patricio V. Poblete, and Peter Sanders) International Journal
32 * of Foundations of Computer Science, November 2011. As the title suggests, it
33 * is inspired by quickheaps, and indeed the underlying representation looks
34 * like a quickheap.
35 *
36 * heap/priority queue operations are defined in the paper in terms of LST
37 * operations.
38 */
39
40/*
41 * The LST as defined in the paper has a fixed size set at creation.
42 * Here, as with quickheaps, but we want to allow for expansion...
43 * though given that, as the paper shows, the expected stack depth
44 * is proportion to the log of the number of items in the LST, expanding
45 * the pivot stack may be a rare event.
46 */
47#define INITIAL_CAPACITY 2048
48
49#define is_power_of_2(_n) ((_n) && (((_n) & ((_n) - 1)) == 0))
50
51typedef unsigned int stack_index_t;
52
53typedef struct {
54 stack_index_t depth; //!< The current stack depth.
55 unsigned int size; //!< The current stack size (number of frames)
56 fr_lst_index_t *data; //!< Array of indices of the pivots (sometimes called roots)
58
59struct fr_lst_s {
60 unsigned int capacity; //!< Number of elements that will fit
61 fr_lst_index_t idx; //!< Starting index, initially zero
62 unsigned int num_elements; //!< Number of elements in the LST
63 size_t offset; //!< Offset of heap index in element structure.
64 void **p; //!< Array of elements.
65 pivot_stack_t s; //!< Stack of pivots, always with depth >= 1.
66 fr_fast_rand_t rand_ctx; //!< Seed for random choices.
67 char const *type; //!< Type of elements.
68 fr_cmp_t cmp; //!< Comparator function.
69};
70
71static inline fr_lst_index_t stack_item(pivot_stack_t const *s, stack_index_t idx) CC_HINT(always_inline, nonnull);
72static inline stack_index_t lst_length(fr_lst_t const *lst, stack_index_t stack_index) CC_HINT(always_inline, nonnull);
73
74static inline CC_HINT(always_inline, nonnull) void *index_addr(fr_lst_t const *lst, void *data)
75{
76 return ((uint8_t *)data) + (lst)->offset;
77}
78
79/*
80 * Concerning item_index() and item_index_set():
81 * To let zero be the value *as stored in an item* that indicates not being in an LST,
82 * we add one to the real index when storing it and subtract one when retrieving it.
83 *
84 * This lets the LST functions use item indices in [0, lst->capacity), important for
85 * 1. the circular array, which allows an important optimization for fr_lst_pop()
86 * 2. quick reduction of indices
87 *
88 * fr_item_insert() needs to see the value actually stored, hence raw_item_index().
89 */
90static inline CC_HINT(always_inline, nonnull) fr_lst_index_t raw_item_index(fr_lst_t const *lst, void *data)
91{
92 return *(fr_lst_index_t *)index_addr(lst, data);
93}
94
95static inline CC_HINT(always_inline, nonnull) fr_lst_index_t item_index(fr_lst_t const *lst, void *data)
96{
97 return raw_item_index(lst, data) - 1;
98}
99
100static inline CC_HINT(always_inline, nonnull) void item_index_set(fr_lst_t *lst, void *data, fr_lst_index_t idx)
101{
102 (*(fr_lst_index_t *)index_addr(lst, data)) = idx + 1;
103}
104
105static inline CC_HINT(always_inline, nonnull) fr_lst_index_t index_reduce(fr_lst_t const *lst, fr_lst_index_t idx)
106{
107 return idx & ((lst)->capacity - 1);
108}
109
110static inline CC_HINT(always_inline, nonnull)
112{
113 return (index_reduce(lst, idx1 - idx2) == 0);
114}
115
116static inline CC_HINT(always_inline, nonnull) void item_set(fr_lst_t *lst, fr_lst_index_t idx, void *data)
117{
118 lst->p[index_reduce(lst, idx)] = data;
119}
120
121static inline CC_HINT(always_inline, nonnull) void *item(fr_lst_t const *lst, fr_lst_index_t idx)
122{
123 return (lst->p[index_reduce(lst, idx)]);
124}
125
126static inline CC_HINT(always_inline, nonnull) void *pivot_item(fr_lst_t const *lst, stack_index_t idx)
127{
128 return item(lst, stack_item(&lst->s, idx));
129}
130
131/*
132 * The paper defines randomized priority queue operations appropriately for the
133 * sum type definition the authors use for LSTs, which are used to implement the
134 * RPQ operations. This code, however, deals with the internal representation,
135 * including the root/pivot stack, which must change as the LST changes. Also, an
136 * insertion or deletion may shift the position of any number of buckets or change
137 * the number of buckets.
138 *
139 * So... for those operations, we will pass in the pointer to the LST, but
140 * internally, we'll represent it and its subtrees with an (LST pointer, stack index)
141 * pair. The index is that of the least pivot greater than or equal to all items in
142 * the subtree (considering the "fictitious" pivot greater than anything, so (lst, 0)
143 * represents the entire tree.
144 *
145 * The fictitious pivot at the bottom of the stack isn't actually in the array,
146 * so don't try to refer to what's there.
147 *
148 * The index is visible for the size and length functions, since they need
149 * to know the subtree they're working on.
150 */
151static inline CC_HINT(always_inline, nonnull) bool is_bucket(fr_lst_t const *lst, stack_index_t idx)
152{
153 return lst_length(lst, idx) == 1;
154}
155
157{
159 unsigned int n_size;
160
161#ifndef NDEBUG
162 /*
163 * This likely can't happen, we just include
164 * the guard to keep static analysis happy.
165 */
166 if (unlikely(s->size > (UINT_MAX - s->size))) {
167 if (s->size == UINT_MAX) {
168 fr_strerror_const("lst stack is full");
169 return false;
170 } else {
171 n_size = UINT_MAX;
172 }
173 } else {
174#endif
175 n_size = s->size * 2;
176#ifndef NDEBUG
177 }
178#endif
179
180 n = talloc_realloc(lst, s->data, fr_lst_index_t, n_size);
181 if (unlikely(!n)) {
182 fr_strerror_printf("Failed expanding lst stack to %u elements (%zu bytes)",
183 n_size, n_size * sizeof(fr_lst_index_t));
184 return false;
185 }
186
187 s->size = n_size;
188 s->data = n;
189 return true;
190}
191
192static inline CC_HINT(always_inline, nonnull) int stack_push(fr_lst_t *lst, pivot_stack_t *s, fr_lst_index_t pivot)
193{
194 if (unlikely(s->depth == s->size && !stack_expand(lst, s))) return -1;
195
196 s->data[s->depth++] = pivot;
197 return 0;
198}
199
200static inline CC_HINT(always_inline, nonnull) void stack_pop(pivot_stack_t *s, unsigned int n)
201{
202 s->depth -= n;
203}
204
205static inline CC_HINT(always_inline, nonnull) stack_index_t stack_depth(pivot_stack_t const *s)
206{
207 return s->depth;
208}
209
211{
212 return s->data[idx];
213}
214
215static inline CC_HINT(always_inline, nonnull)
217{
218 s->data[idx] = new_value;
219}
220
221fr_lst_t *_fr_lst_alloc(TALLOC_CTX *ctx, fr_cmp_t cmp, char const *type, size_t offset, fr_lst_index_t init)
222{
223 fr_lst_t *lst;
224 pivot_stack_t *s;
225 unsigned int initial_stack_capacity;
226
227 if (!init) {
229 } else if (!is_power_of_2(init)) {
230 init = 1 << fr_high_bit_pos(init);
231 }
232
233 for (initial_stack_capacity = 1; (1U << initial_stack_capacity) < init; initial_stack_capacity++) ;
234
235 /*
236 * Pre-allocate stack memory as it is
237 * unlikely to need to grow in practice.
238 *
239 * We don't pre-allocate the array of elements
240 * If we pre-allocated the array of elements
241 * we'd end up wasting that memory as soon as
242 * we needed to expand the array.
243 *
244 * Pre-allocating three chunks appears to be
245 * the optimum.
246 */
247 lst = talloc_zero_pooled_object(ctx, fr_lst_t, 3, (initial_stack_capacity * sizeof(fr_lst_index_t)));
248 if (unlikely(!lst)) return NULL;
249
250 lst->capacity = init;
251 lst->p = talloc_array(lst, void *, lst->capacity);
252 if (unlikely(!lst->p)) {
253 cleanup:
254 talloc_free(lst);
255 return NULL;
256 }
257
258 /*
259 * Allocate the initial stack
260 */
261 s = &lst->s;
262 s->data = talloc_array(lst, fr_lst_index_t, initial_stack_capacity);
263 if (unlikely(!s->data)) goto cleanup;
264 s->depth = 0;
265 s->size = initial_stack_capacity;
266
267 /* Initially the LST is empty and we start at the beginning of the array */
268 stack_push(lst, &lst->s, 0);
269
270 lst->idx = 0;
271
272 /* Prepare for random choices */
273 lst->rand_ctx.a = fr_rand();
274 lst->rand_ctx.b = fr_rand();
275
276 lst->type = type;
277 lst->cmp = cmp;
278 lst->offset = offset;
279
280 return lst;
281}
282
283/** The length function for LSTs (how many buckets it contains)
284 *
285 */
286static inline stack_index_t lst_length(fr_lst_t const *lst, stack_index_t stack_index)
287{
288 return stack_depth(&lst->s) - stack_index;
289}
290
291/** The size function for LSTs (number of items a (sub)tree contains)
292 *
293 */
294static CC_HINT(nonnull) fr_lst_index_t lst_size(fr_lst_t *lst, stack_index_t stack_index)
295{
296 fr_lst_index_t reduced_right, reduced_idx;
297
298 if (stack_index == 0) return lst->num_elements;
299
300 reduced_right = index_reduce(lst, stack_item(&lst->s, stack_index));
301 reduced_idx = index_reduce(lst, lst->idx);
302
303 if (reduced_idx <= reduced_right) return reduced_right - reduced_idx; /* No wraparound--easy. */
304
305 return (lst->capacity - reduced_idx) + reduced_right;
306}
307
308/** Flatten an LST, i.e. turn it into the base-case one bucket [sub]tree
309 *
310 * NOTE: so doing leaves the passed stack_index valid--we just add
311 * everything once in the left subtree to it.
312 */
313static inline CC_HINT(always_inline, nonnull) void lst_flatten(fr_lst_t *lst, stack_index_t stack_index)
314{
315 stack_pop(&lst->s, stack_depth(&lst->s) - stack_index);
316}
317
318/** Move data to a specific location in an LST's array.
319 *
320 * The caller must have made sure the location is available and exists
321 * in said array.
322 */
323static inline CC_HINT(always_inline, nonnull) void lst_move(fr_lst_t *lst, fr_lst_index_t location, void *data)
324{
325 item_set(lst, location, data);
326 item_index_set(lst, data, index_reduce(lst, location));
327}
328
329/** Add data to the bucket of a specified (sub)tree.
330 *
331 */
332static void bucket_add(fr_lst_t *lst, stack_index_t stack_index, void *data)
333{
334 fr_lst_index_t new_space;
335 stack_index_t ridx;
336
337 /*
338 * For each bucket to the right, starting from the top,
339 * make a space available at the top and move the bottom item
340 * into it. Since ordering within a bucket doesn't matter, we
341 * can do that, minimizing moving and index adjustment.
342 *
343 * The fictitious pivot doesn't correspond to an actual value,
344 * so we save pivot moving for the end of the loop.
345 */
346 for (ridx = 0; ridx < stack_index; ridx++) {
347 fr_lst_index_t prev_pivot_index = stack_item(&lst->s, ridx + 1);
348 bool empty_bucket;
349
350 new_space = stack_item(&lst->s, ridx);
351 empty_bucket = (new_space - prev_pivot_index) == 1;
352 stack_set(&lst->s, ridx, new_space + 1);
353
354 if (!empty_bucket) lst_move(lst, new_space, item(lst, prev_pivot_index + 1));
355
356 /* move the pivot up, leaving space for the next bucket */
357 lst_move(lst, prev_pivot_index + 1, item(lst, prev_pivot_index));
358 }
359
360 /*
361 * If the bucket isn't the leftmost, the above loop has made space
362 * available where the pivot used to be.
363 * If it is the leftmost, the loop wasn't executed, but the fictitious
364 * pivot isn't there, which is just as good.
365 */
366 new_space = stack_item(&lst->s, stack_index);
367 stack_set(&lst->s, stack_index, new_space + 1);
368 lst_move(lst, new_space, data);
369
370 lst->num_elements++;
371}
372
373/** Reduce pivot stack indices based on their difference from lst->idx, and then reduce lst->idx
374 *
375 */
377{
378 fr_lst_index_t reduced_idx = index_reduce(lst, lst->idx);
379 stack_index_t depth = stack_depth(&lst->s), i;
380
381 for (i = 0; i < depth; i++) stack_set(&lst->s, i, reduced_idx + stack_item(&lst->s, i) - lst->idx);
382
383 lst->idx = reduced_idx;
384}
385
386/** Make more space available in an LST
387 *
388 * The LST paper only mentions this option in passing, pointing out that it's O(n); the only
389 * constructor in the paper lets you hand it an array of items to initially insert
390 * in the LST, so elements will have to be removed to make room for more (though it's
391 * easy to see how one could specify extra space).
392 *
393 * Were it not for the circular array optimization, it would be talloc_realloc() and done;
394 * it works or it doesn't. (That's still O(n), since it may require copying the data.)
395 *
396 * With the circular array optimization, if lst->idx refers to something other than the
397 * beginning of the array, you have to move the elements preceding it to beginning of the
398 * newly-available space so it's still contiguous, and keep pivot stack entries consistent
399 * with the positions of the elements.
400 */
401static bool lst_expand(fr_lst_t *lst)
402{
403 void **n;
404 unsigned int old_capacity = lst->capacity, n_capacity;
406
407 if (unlikely(old_capacity > (UINT_MAX - old_capacity))) {
408 if (old_capacity == UINT_MAX) {
409 fr_strerror_const("lst is full");
410 return false;
411 } else {
412 n_capacity = UINT_MAX;
413 }
414 } else {
415 n_capacity = old_capacity * 2;
416 }
417
418 n = talloc_realloc(lst, lst->p, void *, n_capacity);
419 if (unlikely(!n)) {
420 fr_strerror_printf("Failed expanding lst to %u elements (%zu bytes)",
421 n_capacity, n_capacity * sizeof(void *));
422 return false;
423 }
424
425 lst->p = n;
426 lst->capacity = n_capacity;
427
429
430 for (i = 0; i < lst->idx; i++) {
431 void *to_be_moved = item(lst, i);
432 fr_lst_index_t new_index = item_index(lst, to_be_moved) + old_capacity;
433
434 lst_move(lst, new_index, to_be_moved);
435 }
436
437 return true;
438}
439
440static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_lwb(fr_lst_t const *lst, stack_index_t stack_index)
441{
442 if (is_bucket(lst, stack_index)) return lst->idx;
443
444 return stack_item(&lst->s, stack_index + 1) + 1;
445}
446
447/*
448 * Note: buckets can be empty,
449 */
450static inline CC_HINT(always_inline, nonnull) fr_lst_index_t bucket_upb(fr_lst_t const *lst, stack_index_t stack_index)
451{
452 return stack_item(&lst->s, stack_index) - 1;
453}
454
455/*
456 * Partition an LST
457 * It's only called for trees that are a single nonempty bucket;
458 * if it's a subtree, it is thus necessarily the leftmost.
459 */
460static void partition(fr_lst_t *lst, stack_index_t stack_index)
461{
462 fr_lst_index_t low = bucket_lwb(lst, stack_index);
463 fr_lst_index_t high = bucket_upb(lst, stack_index);
464 fr_lst_index_t l, h;
465 fr_lst_index_t pivot_index;
466 void *pivot;
467 void *temp;
468
469 /*
470 * Hoare partition doesn't do the trivial case, so catch it here.
471 */
472 if (is_equivalent(lst, low, high)) {
473 stack_push(lst, &lst->s, low);
474 return;
475 }
476
477 pivot_index = low + (fr_fast_rand(&lst->rand_ctx) % (high + 1 - low));
478 pivot = item(lst, pivot_index);
479
480 if (pivot_index != low) {
481 lst_move(lst, pivot_index, item(lst, low));
482 lst_move(lst, low, pivot);
483 }
484
485 /*
486 * Hoare partition; on the avaerage, it does a third the swaps of
487 * Lomuto.
488 */
489 l = low - 1;
490 h = high + 1;
491 for (;;) {
492 while (lst->cmp(item(lst, --h), pivot) > 0) ;
493 while (lst->cmp(item(lst, ++l), pivot) < 0) ;
494 if (l >= h) break;
495 temp = item(lst, l);
496 lst_move(lst, l, item(lst, h));
497 lst_move(lst, h, temp);
498 }
499
500 /*
501 * Hoare partition doesn't guarantee the pivot sits at location h
502 * the way Lomuto does and LST needs, so first get its location...
503 */
504 pivot_index = item_index(lst, pivot);
505 if (pivot_index >= index_reduce(lst, low)) {
506 pivot_index = low + pivot_index - index_reduce(lst, low);
507 } else {
508 pivot_index = high - (index_reduce(lst, high) - pivot_index);
509 }
510
511 /*
512 * ...and then move it if need be.
513 */
514 if (pivot_index < h) {
515 lst_move(lst, pivot_index, item(lst, h));
516 lst_move(lst, h, pivot);
517 }
518 if (pivot_index > h) {
519 h++;
520 lst_move(lst, pivot_index, item(lst, h));
521 lst_move(lst, h, pivot);
522 }
523
524 stack_push(lst, &lst->s, h);
525}
526
527/*
528 * Delete an item from a bucket in an LST
529 */
530static void bucket_delete(fr_lst_t *lst, stack_index_t stack_index, void *data)
531{
532 fr_lst_index_t location = item_index(lst, data);
533 fr_lst_index_t top;
534
535 if (is_equivalent(lst, location, lst->idx)) {
536 lst->idx++;
537 if (is_equivalent(lst, lst->idx, 0)) lst_indices_reduce(lst);
538 } else {
539 for (;;) {
540 top = bucket_upb(lst, stack_index);
541 if (!is_equivalent(lst, location, top)) lst_move(lst, location, item(lst, top));
542 stack_set(&lst->s, stack_index, top);
543 if (stack_index == 0) break;
544 lst_move(lst, top, item(lst, top + 1));
545 stack_index--;
546 location = top + 1;
547 }
548 }
549
550 lst->num_elements--;
551 item_index_set(lst, data, -1);
552}
553
554/*
555 * We precede each function that does the real work with a Pythonish
556 * (but colon-free) version of the pseudocode from the paper.
557 *
558 * clang, in version 13, will have a way to force tail call optimization
559 * with a "musttail" attribute. gcc has -f-foptimize-sibling-calls, but
560 * it works only with -O[23s]. For now, -O2 will assure TCO. In its absence,
561 * the recursion depth is bounded by the number of pivot stack entries, aka
562 * the "length" of the LST, which has an expected value proportional to
563 * log(number of nodes).
564 *
565 * NOTE: inlining a recursive function is not advisable, so no
566 * always_inline here.
567 */
568
569/*
570 * ExtractMin(LST T ) // assumes s(T ) > 0
571 * If T = bucket(B) Then
572 * Partition(T ) // O(|B|)
573 * Let T = tree(r, L, B )
574 * If s(L) = 0 Then
575 * Flatten T into bucket(B ) // O(1)
576 * Remove r from bucket B // O(1)
577 * Return r
578 * Else
579 * Return ExtractMin(L)
580 */
581static inline CC_HINT(nonnull) void *_fr_lst_pop(fr_lst_t *lst, stack_index_t stack_index)
582{
583 if (is_bucket(lst, stack_index)) partition(lst, stack_index);
584 ++stack_index;
585 if (lst_size(lst, stack_index) == 0) {
586 void *min = pivot_item(lst, stack_index);
587
588 lst_flatten(lst, stack_index);
589 bucket_delete(lst, stack_index, min);
590 return min;
591 }
592 return _fr_lst_pop(lst, stack_index);
593}
594
595/*
596 * FindMin(LST T ) // assumes s(T ) > 0
597 * If T = bucket(B) Then
598 * Partition(T ) // O(|B|)
599 * Let T = tree(r, L, B )
600 * If s(L) = 0 Then
601 * Return r
602 * Else
603 * Return FindMin(L)
604 */
605static inline CC_HINT(nonnull) void *_fr_lst_peek(fr_lst_t *lst, stack_index_t stack_index)
606{
607 if (is_bucket(lst, stack_index)) partition(lst, stack_index);
608 ++stack_index;
609 if (lst_size(lst, stack_index) == 0) return pivot_item(lst, stack_index);
610 return _fr_lst_peek(lst, stack_index);
611}
612
613/*
614 * Delete(LST T, x ∈ Z)
615 * If T = bucket(B) Then
616 * Remove x from bucket B // O(depth)
617 * Else
618 * Let T = tree(r, L, B′)
619 * If x < r Then
620 * Delete(L, x)
621 * Else If x > r Then
622 * Remove x from bucket B ′ // O(depth)
623 * Else
624 * Flatten T into bucket(B′′) // O(1)
625 * Remove x from bucket B′′ // O(depth)
626 */
627static inline CC_HINT(nonnull) void _fr_lst_extract(fr_lst_t *lst, stack_index_t stack_index, void *data)
628{
629 int8_t cmp;
630
631 if (is_bucket(lst, stack_index)) {
632 bucket_delete(lst, stack_index, data);
633 return;
634 }
635 stack_index++;
636 cmp = lst->cmp(data, pivot_item(lst, stack_index));
637 if (cmp < 0) {
638 _fr_lst_extract(lst, stack_index, data);
639 } else if (cmp > 0) {
640 bucket_delete(lst, stack_index - 1, data);
641 } else {
642 lst_flatten(lst, stack_index);
643 bucket_delete(lst, stack_index, data);
644 }
645}
646
647/*
648 * Insert(LST T, x ∈ Z)
649 * If T = bucket(B) Then
650 * Add x to bucket B // O(depth)
651 * Else
652 * Let T = tree(r, L, B)
653 * If random(s(T) + 1) != 1 Then
654 * If x < r Then
655 * Insert(L, x)
656 * Else
657 * Add x to bucket B // O(depth)
658 * Else
659 * Flatten T into bucket(B′) // O(1)
660 * Add x to bucket B′ // O(depth)
661 */
662static inline CC_HINT(nonnull) void _fr_lst_insert(fr_lst_t *lst, stack_index_t stack_index, void *data)
663{
664#ifndef TALLOC_GET_TYPE_ABORT_NOOP
665 if (lst->type) (void)_talloc_get_type_abort(data, lst->type, __location__);
666#endif
667
668 if (is_bucket(lst, stack_index)) {
669 bucket_add(lst, stack_index, data);
670 return;
671 }
672 stack_index++;
673 if (fr_fast_rand(&lst->rand_ctx) % (lst_size(lst, stack_index) + 1) != 0) {
674 if (lst->cmp(data, pivot_item(lst, stack_index)) < 0) {
675 _fr_lst_insert(lst, stack_index, data);
676 } else {
677 bucket_add(lst, stack_index - 1, data);
678 }
679 } else {
680 lst_flatten(lst, stack_index);
681 bucket_add(lst, stack_index, data);
682 }
683}
684
685/*
686 * We represent a (sub)tree with an (lst, stack index) pair, so
687 * fr_lst_pop(), fr_lst_peek(), and fr_lst_extract() are minimal
688 * wrappers that
689 *
690 * (1) hide our representation from the user and preserve the interface
691 * (2) check preconditions
692 */
693
695{
696 if (unlikely(lst->num_elements == 0)) return NULL;
697 return _fr_lst_pop(lst, 0);
698}
699
701{
702 if (unlikely(lst->num_elements == 0)) return NULL;
703 return _fr_lst_peek(lst, 0);
704}
705
706/** Remove an element from an LST
707 *
708 * @param[in] lst the LST to remove an element from
709 * @param[in] data the element to remove
710 * @return
711 * - 0 if removal succeeds
712 * - -1 if removal fails
713 */
715{
716 if (unlikely(lst->num_elements == 0)) {
717 fr_strerror_const("Tried to extract element from empty LST");
718 return -1;
719 }
720
721 if (unlikely(raw_item_index(lst, data) == 0)) {
722 fr_strerror_const("Tried to extract element not in LST");
723 return -1;
724 }
725
726 _fr_lst_extract(lst, 0, data);
727 return 0;
728}
729
731{
732 /*
733 * Expand if need be. Not in the paper, but we want the capability.
734 */
735 if (unlikely((lst->num_elements == lst->capacity) && !lst_expand(lst))) return -1;
736
737 /*
738 * Don't insert something that looks like it's already in an LST.
739 */
740 if (unlikely(raw_item_index(lst, data) > 0)) {
741 fr_strerror_const("Node is already in the LST");
742 return -1;
743 }
744
745 _fr_lst_insert(lst, 0, data);
746 return 0;
747}
748
750{
751 return lst->num_elements;
752}
753
754/** Iterate over entries in LST
755 *
756 * @note If the LST is modified, the iterator should be considered invalidated.
757 *
758 * @param[in] lst to iterate over.
759 * @param[in] iter Pointer to an iterator struct, used to maintain
760 * state between calls.
761 * @return
762 * - User data.
763 * - NULL if at the end of the list.
764 */
766{
767 if (unlikely(lst->num_elements == 0)) return NULL;
768
769 *iter = lst->idx;
770 return item(lst, *iter);
771}
772
773/** Get the next entry in an LST
774 *
775 * @note If the LST is modified, the iterator should be considered invalidated.
776 *
777 * @param[in] lst to iterate over.
778 * @param[in] iter Pointer to an iterator struct, used to maintain
779 * state between calls.
780 * @return
781 * - User data.
782 * - NULL if at the end of the list.
783 */
785{
786 if ((*iter + 1) >= stack_item(&lst->s, 0)) return NULL;
787 *iter += 1;
788
789 return item(lst, *iter);
790}
791
792#ifndef TALLOC_GET_TYPE_ABORT_NOOP
793void fr_lst_verify(char const *file, int line, fr_lst_t const *lst)
794{
795 fr_lst_index_t fake_pivot_index, reduced_fake_pivot_index, reduced_end;
796 stack_index_t depth = stack_depth(&(lst->s));
797 int bucket_size_sum;
798 bool pivots_in_order = true;
799 bool pivot_indices_in_order = true;
800
801 fr_fatal_assert_msg(lst, "CONSISTENCY CHECK FAILED %s[%i]: LST pointer NULL", file, line);
802 talloc_get_type_abort(lst, fr_lst_t);
803
804 /*
805 * There must be at least the fictitious pivot.
806 */
807 fr_fatal_assert_msg(depth >= 1, "CONSISTENCY CHECK FAILED %s[%i]: LST pivot stack empty", file, line);
808
809 /*
810 * Modulo circularity, idx + the number of elements should be the index
811 * of the fictitious pivot.
812 */
813 fake_pivot_index = stack_item(&(lst->s), 0);
814 reduced_fake_pivot_index = index_reduce(lst, fake_pivot_index);
815 reduced_end = index_reduce(lst, lst->idx + lst->num_elements);
816 fr_fatal_assert_msg(reduced_fake_pivot_index == reduced_end,
817 "CONSISTENCY CHECK FAILED %s[%i]: fictitious pivot doesn't point past last element",
818 file, line);
819
820 /*
821 * Bucket sizes must make sense.
822 */
823 if (lst->num_elements) {
824 bucket_size_sum = 0;
825
826 for (stack_index_t stack_index = 0; stack_index < depth; stack_index++) {
827 fr_lst_index_t bucket_size = bucket_upb(lst, stack_index) - bucket_lwb(lst, stack_index) + 1;
828 fr_fatal_assert_msg(bucket_size <= lst->num_elements,
829 "CONSISTENCY CHECK FAILED %s[%i]: bucket %u size %u is invalid",
830 file, line, stack_index, bucket_size);
831 bucket_size_sum += bucket_size;
832 }
833
834 fr_fatal_assert_msg(bucket_size_sum + depth - 1 == lst->num_elements,
835 "CONSISTENCY CHECK FAILED %s[%i]: buckets inconsistent with number of elements",
836 file, line);
837 }
838
839 /*
840 * No elements should be NULL;
841 * they should have the correct index stored,
842 * and if a type is specified, they should point at something of that type,
843 */
844 for (fr_lst_index_t i = 0; i < lst->num_elements; i++) {
845 void *element = item(lst, lst->idx + i);
846
847 fr_fatal_assert_msg(element, "CONSISTENCY CHECK FAILED %s[%i]: null element pointer at %u",
848 file, line, lst->idx + i);
849 fr_fatal_assert_msg(is_equivalent(lst, lst->idx + i, item_index(lst, element)),
850 "CONSISTENCY CHECK FAILED %s[%i]: element %u index mismatch", file, line, i);
851 if (lst->type) (void) _talloc_get_type_abort(element, lst->type, __location__);
852 }
853
854 /*
855 * There's nothing more to check for a one-bucket tree.
856 */
857 if (is_bucket(lst, 0)) return;
858
859 /*
860 * Otherwise, first, pivots from left to right (aside from the fictitious
861 * one) should be in ascending order.
862 */
863 for (stack_index_t stack_index = 1; stack_index + 1 < depth; stack_index++) {
864 void *current_pivot = pivot_item(lst, stack_index);
865 void *next_pivot = pivot_item(lst, stack_index + 1);
866
867 if (current_pivot && next_pivot && lst->cmp(current_pivot, next_pivot) < 0) pivots_in_order = false;
868 }
869 fr_fatal_assert_msg(pivots_in_order, "CONSISTENCY CHECK FAILED %s[%i]: pivots not in ascending order",
870 file, line);
871
872 /*
873 * Next, the stacked pivot indices should decrease as you ascend from
874 * the bottom of the pivot stack. Here we *do* include the fictitious
875 * pivot; we're just comparing indices.
876 */
877 for (stack_index_t stack_index = 0; stack_index + 1 < depth; stack_index++) {
878 fr_lst_index_t current_pivot_index = stack_item(&(lst->s), stack_index);
879 fr_lst_index_t previous_pivot_index = stack_item(&(lst->s), stack_index + 1);
880
881 if (previous_pivot_index >= current_pivot_index) pivot_indices_in_order = false;
882 }
883 fr_fatal_assert_msg(pivot_indices_in_order, "CONSISTENCY CHECK FAILED %s[%i]: pivots indices not in order",
884 file, line);
885
886 /*
887 * Finally...
888 * values in buckets shouldn't "follow" the pivot to the immediate right (if it exists)
889 * and shouldn't "precede" the pivot to the immediate left (if it exists)
890 */
891 for (stack_index_t stack_index = 0; stack_index < depth; stack_index++) {
892 fr_lst_index_t lwb, upb, pivot_index;
893 void *pivot_item, *element;
894
895 if (stack_index > 0) {
896 lwb = (stack_index + 1 == depth) ? lst->idx : stack_item(&(lst->s), stack_index + 1);
897 pivot_index = upb = stack_item(&(lst->s), stack_index);
898 pivot_item = item(lst, pivot_index);
899 for (fr_lst_index_t index = lwb; index < upb; index++) {
900 element = item(lst, index);
901 fr_fatal_assert_msg(!element || !pivot_item || lst->cmp(element, pivot_item) <= 0,
902 "CONSISTENCY CHECK FAILED %s[%i]: element at %u > pivot at %u",
903 file, line, index, pivot_index);
904 }
905 }
906 if (stack_index + 1 < depth) {
907 upb = stack_item(&(lst->s), stack_index);
908 lwb = pivot_index = stack_item(&(lst->s), stack_index + 1);
909 pivot_item = item(lst, pivot_index);
910 for (fr_lst_index_t index = lwb; index < upb; index++) {
911 element = item(lst, index);
912 fr_fatal_assert_msg(!element || !pivot_item || lst->cmp(pivot_item, element) <= 0,
913 "CONSISTENCY CHECK FAILED %s[%i]: element at %u < pivot at %u",
914 file, line, index, pivot_index);
915 }
916 }
917 }
918}
919#endif
int const char * file
Definition acutest.h:702
int n
Definition acutest.h:577
int const char int line
Definition acutest.h:702
static bool init
Definition fuzzer.c:40
#define RCSID(id)
Definition build.h:488
#define unlikely(_x)
Definition build.h:384
static size_t min(size_t x, size_t y)
Definition dbuff.c:66
#define fr_fatal_assert_msg(_x, _fmt,...)
Calls panic_action ifndef NDEBUG, else logs error and causes the server to exit immediately with code...
Definition debug.h:186
talloc_free(hp)
static int stack_depth
Definition radmin.c:156
void * fr_lst_iter_next(fr_lst_t *lst, fr_lst_iter_t *iter)
Get the next entry in an LST.
Definition lst.c:784
int fr_lst_extract(fr_lst_t *lst, void *data)
Remove an element from an LST.
Definition lst.c:714
stack_index_t depth
The current stack depth.
Definition lst.c:54
static fr_lst_index_t stack_item(pivot_stack_t const *s, stack_index_t idx)
Definition lst.c:210
static void lst_indices_reduce(fr_lst_t *lst)
Reduce pivot stack indices based on their difference from lst->idx, and then reduce lst->idx.
Definition lst.c:376
void * fr_lst_iter_init(fr_lst_t *lst, fr_lst_iter_t *iter)
Iterate over entries in LST.
Definition lst.c:765
static void * item(fr_lst_t const *lst, fr_lst_index_t idx)
Definition lst.c:121
void fr_lst_verify(char const *file, int line, fr_lst_t const *lst)
Definition lst.c:793
void * fr_lst_pop(fr_lst_t *lst)
Definition lst.c:694
static void lst_flatten(fr_lst_t *lst, stack_index_t stack_index)
Flatten an LST, i.e.
Definition lst.c:313
#define is_power_of_2(_n)
Definition lst.c:49
static void * pivot_item(fr_lst_t const *lst, stack_index_t idx)
Definition lst.c:126
static void item_set(fr_lst_t *lst, fr_lst_index_t idx, void *data)
Definition lst.c:116
static fr_lst_index_t lst_size(fr_lst_t *lst, stack_index_t stack_index)
The size function for LSTs (number of items a (sub)tree contains)
Definition lst.c:294
static void partition(fr_lst_t *lst, stack_index_t stack_index)
Definition lst.c:460
unsigned int capacity
Number of elements that will fit.
Definition lst.c:60
static bool is_equivalent(fr_lst_t const *lst, fr_lst_index_t idx1, fr_lst_index_t idx2)
Definition lst.c:111
#define INITIAL_CAPACITY
Definition lst.c:47
char const * type
Type of elements.
Definition lst.c:67
pivot_stack_t s
Stack of pivots, always with depth >= 1.
Definition lst.c:65
static fr_lst_index_t index_reduce(fr_lst_t const *lst, fr_lst_index_t idx)
Definition lst.c:105
static void * index_addr(fr_lst_t const *lst, void *data)
Definition lst.c:74
static bool lst_expand(fr_lst_t *lst)
Make more space available in an LST.
Definition lst.c:401
static stack_index_t lst_length(fr_lst_t const *lst, stack_index_t stack_index)
The length function for LSTs (how many buckets it contains)
Definition lst.c:286
static void bucket_delete(fr_lst_t *lst, stack_index_t stack_index, void *data)
Definition lst.c:530
static void stack_set(pivot_stack_t *s, stack_index_t idx, fr_lst_index_t new_value)
Definition lst.c:216
fr_lst_index_t idx
Starting index, initially zero.
Definition lst.c:61
int fr_lst_insert(fr_lst_t *lst, void *data)
Definition lst.c:730
unsigned int fr_lst_num_elements(fr_lst_t *lst)
Definition lst.c:749
static void stack_pop(pivot_stack_t *s, unsigned int n)
Definition lst.c:200
static void bucket_add(fr_lst_t *lst, stack_index_t stack_index, void *data)
Add data to the bucket of a specified (sub)tree.
Definition lst.c:332
static void _fr_lst_extract(fr_lst_t *lst, stack_index_t stack_index, void *data)
Definition lst.c:627
static fr_lst_index_t raw_item_index(fr_lst_t const *lst, void *data)
Definition lst.c:90
static void * _fr_lst_pop(fr_lst_t *lst, stack_index_t stack_index)
Definition lst.c:581
void * fr_lst_peek(fr_lst_t *lst)
Definition lst.c:700
unsigned int size
The current stack size (number of frames)
Definition lst.c:55
static void * _fr_lst_peek(fr_lst_t *lst, stack_index_t stack_index)
Definition lst.c:605
static void _fr_lst_insert(fr_lst_t *lst, stack_index_t stack_index, void *data)
Definition lst.c:662
unsigned int stack_index_t
Definition lst.c:51
fr_fast_rand_t rand_ctx
Seed for random choices.
Definition lst.c:66
unsigned int num_elements
Number of elements in the LST.
Definition lst.c:62
static void lst_move(fr_lst_t *lst, fr_lst_index_t location, void *data)
Move data to a specific location in an LST's array.
Definition lst.c:323
fr_lst_index_t * data
Array of indices of the pivots (sometimes called roots)
Definition lst.c:56
static fr_lst_index_t item_index(fr_lst_t const *lst, void *data)
Definition lst.c:95
static bool is_bucket(fr_lst_t const *lst, stack_index_t idx)
Definition lst.c:151
static void item_index_set(fr_lst_t *lst, void *data, fr_lst_index_t idx)
Definition lst.c:100
static fr_lst_index_t bucket_upb(fr_lst_t const *lst, stack_index_t stack_index)
Definition lst.c:450
static int stack_push(fr_lst_t *lst, pivot_stack_t *s, fr_lst_index_t pivot)
Definition lst.c:192
static fr_lst_index_t bucket_lwb(fr_lst_t const *lst, stack_index_t stack_index)
Definition lst.c:440
size_t offset
Offset of heap index in element structure.
Definition lst.c:63
fr_lst_t * _fr_lst_alloc(TALLOC_CTX *ctx, fr_cmp_t cmp, char const *type, size_t offset, fr_lst_index_t init)
Definition lst.c:221
static bool stack_expand(fr_lst_t *lst, pivot_stack_t *s)
Definition lst.c:156
void ** p
Array of elements.
Definition lst.c:64
fr_cmp_t cmp
Comparator function.
Definition lst.c:68
Definition lst.c:59
fr_lst_index_t fr_lst_iter_t
Definition lst.h:46
unsigned int fr_lst_index_t
Definition lst.h:44
static uint8_t fr_high_bit_pos(uint64_t num)
Find the highest order high bit in an unsigned 64 bit integer.
Definition math.h:94
unsigned char bool
unsigned char uint8_t
static uint8_t depth(fr_minmax_heap_index_t i)
Definition minmax_heap.c:83
int8_t(* fr_cmp_t)(void const *a, void const *b)
Definition misc.h:38
static bool cleanup
Definition radsniff.c:59
uint32_t fr_fast_rand(fr_fast_rand_t *ctx)
Definition rand.c:278
uint32_t fr_rand(void)
Return a 32-bit random number.
Definition rand.c:104
uint32_t b
Definition rand.h:55
uint32_t a
Definition rand.h:55
Smaller fast random number generator.
Definition rand.h:54
fr_aka_sim_id_type_t type
#define talloc_zero_pooled_object(_ctx, _type, _num_subobjects, _total_subobjects_size)
Definition talloc.h:201
#define fr_strerror_printf(_fmt,...)
Log to thread local error buffer.
Definition strerror.h:64
#define fr_strerror_const(_msg)
Definition strerror.h:223
static fr_slen_t data
Definition value.h:1340
int nonnull(2, 5))