The FreeRADIUS server $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
Loading...
Searching...
No Matches
slab.h
Go to the documentation of this file.
1#pragma once
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
16 */
17
18/** Resource pool management using slabs of memory
19 *
20 * @file src/lib/util/slab.h
21 *
22 * @copyright 2023 Network RADIUS SAS (legal@networkradius.com)
23 */
24RCSIDH(slab_h, "$Id: 0f2c2eb0f32433205fc0a3a884c6d3856d931398 $")
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <freeradius-devel/util/dlist.h>
31#include <freeradius-devel/util/event.h>
32
33/** conf_parser_t entries to populate user configurable slab values
34 */
35#define FR_SLAB_CONFIG_CONF_PARSER \
36 { FR_CONF_OFFSET("min", fr_slab_config_t, min_elements), .dflt = "10" }, \
37 { FR_CONF_OFFSET("max", fr_slab_config_t, max_elements), .dflt = "100" }, \
38 { FR_CONF_OFFSET("cleanup_interval", fr_slab_config_t, interval), .dflt = "30s" }, \
39
40/** Tuneable parameters for slabs
41 */
42typedef struct { \
43 unsigned int elements_per_slab; //!< Number of elements to allocate per slab.
44 unsigned int min_elements; //!< Minimum number of elements to keep allocated.
45 unsigned int max_elements; //!< Maximum number of elements to allocate using slabs.
46 bool at_max_fail; //!< Should requests for additional elements fail when the
47 ///< number in use has reached max_elements.
48 unsigned int num_children; //!< How many child allocations are expected off each element.
49 size_t child_pool_size; //!< Size of pool space to be allocated to each element.
50 bool allow_direct_free; //!< Allow in-use elements to be freed with talloc_free.
51 ///< Normally this is a programming error and triggers an
52 ///< assertion. Only set for testing the destructor path.
53 fr_time_delta_t interval; //!< Interval between slab cleanup events being fired.
55
56/** Define type specific wrapper structs for slabs and slab elements
57 *
58 * @note This macro should be used inside the header for the area of code
59 * which will use the type specific slabs.
60 *
61 * The top level structure is the slab_list. Slab elements are reserved
62 * from this list. One or more slabs are created as children of the slab_list
63 * each of which consist of a number of elements.
64 *
65 * Within the slab_list there are two dlists which hold the slabs. Slabs in
66 * the `reserved` list have all their elements in use. Slabs in the `avail`
67 * list have elements available to reserve.
68 *
69 * Allocation of new elements can cause an allocation callback to be invoked
70 * if elements need initialisation.
71 *
72 * @param[in] _name to use in type specific structures.
73 * @param[in] _type of structure which will be held in the slab elements.
74 */
75#define FR_SLAB_TYPES(_name, _type) \
76 FR_DLIST_TYPES(_name ## _slab) \
77 FR_DLIST_TYPES(_name ## _slab_element) \
78\
79 typedef int (*_type ## _slab_free_t)(_type *elem, void *uctx); \
80 typedef int (*_type ## _slab_alloc_t)(_type *elem, void *uctx); \
81 typedef int (*_type ## _slab_reserve_t)(_type *elem, void *uctx); \
82\
83 typedef struct { \
84 FR_DLIST_HEAD(_name ## _slab) reserved; \
85 FR_DLIST_HEAD(_name ## _slab) avail; \
86 fr_event_list_t *el; \
87 fr_timer_t *ev; \
88 fr_slab_config_t config; \
89 unsigned int in_use; \
90 unsigned int high_water_mark; \
91 _type ## _slab_alloc_t alloc; \
92 _type ## _slab_reserve_t reserve; \
93 void *uctx; \
94 bool release_reset; \
95 bool reserve_mru; \
96 } _name ## _slab_list_t; \
97\
98 typedef struct { \
99 FR_DLIST_ENTRY(_name ## _slab) entry; \
100 _name ## _slab_list_t *list; \
101 TALLOC_CTX *pool; \
102 bool being_freed; \
103 FR_DLIST_HEAD(_name ## _slab_element) reserved; \
104 FR_DLIST_HEAD(_name ## _slab_element) avail; \
105 } _name ## _slab_t; \
106\
107 typedef struct { \
108 _type elem; \
109 FR_DLIST_ENTRY(_name ## _slab_element) entry; \
110 bool in_use; \
111 _name ## _slab_t *slab; \
112 _type ## _slab_free_t free; \
113 void *uctx; \
114 } _name ## _slab_element_t;
115
116/** Define type specific wrapper functions for slabs and slab elements
117 *
118 * @note This macro should be used inside the source file that will use
119 * the type specific functions.
120 *
121 * @param[in] _name used in type specific structures.
122 * @param[in] _type of structure returned by the reserve function.
123 */
124#define FR_SLAB_FUNCS(_name, _type) \
125 FR_DLIST_FUNCS(_name ## _slab, _name ## _slab_t, entry) \
126 FR_DLIST_FUNCS(_name ## _slab_element, _name ## _slab_element_t, entry) \
127\
128DIAG_OFF(unused-function) \
129 /** Timer event for freeing unused slabs \
130 * \
131 * Called periodically to clear up slab allocations. \
132 * Slabs where all elements are not in use will be freed, \
133 * up to half of the element count between the high water mark \
134 * and the current number in use. \
135 */ \
136 static void _ ## _name ## _slab_cleanup(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) \
137 { \
138 _name ## _slab_list_t *slab_list = talloc_get_type_abort(uctx, _name ## _slab_list_t); \
139 _name ## _slab_t *slab = NULL, *next_slab = NULL; \
140 unsigned int to_clear, cleared = 0; \
141 to_clear = (slab_list->high_water_mark - slab_list->in_use) / 2; \
142 if ((slab_list->in_use + to_clear) < slab_list->config.min_elements) \
143 to_clear = slab_list->high_water_mark - slab_list->config.min_elements; \
144 if (to_clear < slab_list->config.elements_per_slab) goto finish; \
145 slab = _name ## _slab_head(&slab_list->avail); \
146 while (slab) { \
147 next_slab = _name ## _slab_next(&slab_list->avail, slab); \
148 if (_name ## _slab_element_num_elements(&slab->reserved) > 0) goto next; \
149 _name ## _slab_remove(&slab_list->avail, slab); \
150 cleared += _name ## _slab_element_num_elements(&slab->avail); \
151 to_clear -= _name ## _slab_element_num_elements(&slab->avail); \
152 _name ## _slab_element_talloc_free(&slab->avail); \
153 talloc_free(slab); \
154 if (to_clear < slab_list->config.elements_per_slab) break; \
155 next: \
156 slab = next_slab; \
157 } \
158 slab_list->high_water_mark -= cleared; \
159 finish: \
160 (void) fr_timer_in(slab_list, tl, &slab_list->ev, slab_list->config.interval, false, \
161 _ ## _name ## _slab_cleanup, slab_list); \
162 } \
163\
164 /** Allocate a slab list to manage slabs of allocated memory \
165 * \
166 * @param[in] ctx in which to allocate the slab list. \
167 * @param[in] el Event list in which to run clean up function. \
168 * @param[in] config Slab config parameters. \
169 * @param[in] alloc Optional callback to use when allocating new elements. \
170 * @param[in] reserve Optional callback run on element reserving. \
171 * @param[in] uctx to pass to callbacks. \
172 * @param[in] release_reset On release: free talloc children, zero element memory. \
173 * Use when elements may accumulate allocations that should not persist. \
174 * @param[in] reserve_mru If true, the most recently used element will be returned when an element is reserved. \
175 * @return \
176 * - A new slab. \
177 * - NULL on error. \
178 */ \
179 static inline _name ## _slab_list_t *_name ## _slab_list_alloc(TALLOC_CTX *ctx, \
180 fr_event_list_t *el, \
181 fr_slab_config_t const *config, \
182 _type ## _slab_alloc_t alloc, \
183 _type ## _slab_reserve_t reserve, \
184 void *uctx, \
185 bool release_reset, \
186 bool reserve_mru) \
187 { \
188 _name ## _slab_list_t *slab; \
189 MEM(slab = talloc_zero(ctx, _name ## _slab_list_t)); \
190 slab->el = el; \
191 slab->config = *config; \
192 if (slab->config.elements_per_slab == 0) { \
193 slab->config.elements_per_slab = (config->min_elements ? config->min_elements : 1); \
194 } \
195 slab->alloc = alloc; \
196 slab->reserve = reserve; \
197 slab->uctx = uctx; \
198 slab->release_reset = release_reset; \
199 slab->reserve_mru = reserve_mru; \
200 _name ## _slab_init(&slab->reserved); \
201 _name ## _slab_init(&slab->avail); \
202 if (el) { \
203 if (unlikely(fr_timer_in(slab, el->tl, &slab->ev, config->interval, false, _ ## _name ## _slab_cleanup, slab) < 0)) { \
204 talloc_free(slab); \
205 return NULL; \
206 }; \
207 } \
208 return slab; \
209 } \
210\
211 /** Callback for talloc freeing a slab \
212 * \
213 * Sets the being_freed flag so that element destructors know this \
214 * is a bulk teardown and not an erroneous direct free. \
215 * talloc calls this destructor before freeing children. \
216 */ \
217 static int _ ## _name ## _slab_free(_name ## _slab_t *slab) \
218 { \
219 slab->being_freed = true; \
220 return 0; \
221 } \
222\
223 /** Callback for talloc freeing a slab element \
224 * \
225 * Called during bulk teardown (talloc_free of a slab or slab list). \
226 * Ensures the element is cleanly removed from whichever dlist it is on. \
227 * \
228 * In-use slab elements must NEVER be talloc_free'd directly. Use \
229 * the type-specific _slab_release() function instead. Calling \
230 * talloc_free permanently frees the element's pool memory,
231 * progressively degrading slab efficiency. The talloc pool cannot \
232 * be fully reclaimed until ALL children are freed, so piecemeal frees \
233 * fragment the pool and shrink the usable element count on each reallocation
234 * cycle. \
235 */ \
236 static int _ ## _type ## _element_free(_name ## _slab_element_t *element) \
237 { \
238 _name ## _slab_t *slab; \
239 if (element->in_use && element->free) element->free(( _type *)element, element->uctx); \
240 if (!element->slab) return 0; \
241 slab = element->slab; \
242 if (element->in_use) { \
243 fr_assert_msg(slab->being_freed || slab->list->config.allow_direct_free, \
244 "in-use slab element freed with talloc_free - use _slab_release()"); \
245 slab->list->in_use--; \
246 element->in_use = false; \
247 _name ## _slab_element_remove(&slab->reserved, element); \
248 } else { \
249 _name ## _slab_element_remove(&slab->avail, element); \
250 } \
251 return 0; \
252 } \
253\
254 /** Reserve a slab element \
255 * \
256 * If there is not a free slab element already allocated, \
257 * a new slab will be allocated, until the the max_elements limit \
258 * of elements have been created. \
259 * \
260 * @param[in] slab_list to reserve an element from. \
261 * @return a slab element. \
262 */ \
263 static inline CC_HINT(nonnull) _type *_name ## _slab_reserve(_name ## _slab_list_t *slab_list) \
264 { \
265 _name ## _slab_t *slab; \
266 _name ## _slab_element_t *element = NULL; \
267\
268 slab = slab_list->reserve_mru ? _name ## _slab_tail(&slab_list->avail) : \
269 _name ## _slab_head(&slab_list->avail); \
270 if (!slab && ((_name ## _slab_num_elements(&slab_list->reserved) * \
271 slab_list->config.elements_per_slab) < slab_list->config.max_elements)) { \
272 _name ## _slab_element_t *new_element; \
273 unsigned int count, elems; \
274 size_t elem_size; \
275 elems = slab_list->config.elements_per_slab * (1 + slab_list->config.num_children); \
276 elem_size = slab_list->config.elements_per_slab * (sizeof(_name ## _slab_element_t) + \
277 slab_list->config.child_pool_size); \
278 MEM(slab = talloc_zero_pooled_object(slab_list, _name ## _slab_t, elems, elem_size)); \
279 talloc_set_destructor(slab, _ ## _name ## _slab_free); \
280 _name ## _slab_element_init(&slab->avail); \
281 _name ## _slab_element_init(&slab->reserved); \
282 _name ## _slab_insert_head(&slab_list->avail, slab); \
283 slab->list = slab_list; \
284 for (count = 0; count < slab_list->config.elements_per_slab; count++) { \
285 if (slab_list->config.num_children > 0) { \
286 MEM(new_element = talloc_zero_pooled_object(slab, _name ## _slab_element_t, \
287 slab_list->config.num_children, \
288 slab_list->config.child_pool_size)); \
289 } else { \
290 MEM(new_element = talloc_zero(slab, _name ## _slab_element_t)); \
291 } \
292 talloc_set_type(new_element, _type); \
293 talloc_set_destructor(new_element, _ ## _type ## _element_free); \
294 _name ## _slab_element_insert_tail(&slab->avail, new_element); \
295 new_element->slab = slab; \
296 } \
297 /* Initialisation of new elements done after allocation to ensure \
298 * are all allocated from the pool. Without this, any talloc done \
299 * by the callback may take memory from the pool. \
300 * Any elements which fail to initialise are freed immediately. */ \
301 if (slab_list->alloc) { \
302 _name ## _slab_element_t *prev = NULL; \
303 new_element = NULL; \
304 while ((new_element = _name ## _slab_element_next(&slab->avail, new_element))) { \
305 if (slab_list->alloc((_type *)new_element, slab_list->uctx) < 0) { \
306 prev = _name ## _slab_element_remove(&slab->avail, new_element); \
307 talloc_free(new_element); \
308 new_element = prev; \
309 continue; \
310 } \
311 } \
312 } \
313 slab_list->high_water_mark += _name ## _slab_element_num_elements(&slab->avail); \
314 } \
315 if (!slab && slab_list->config.at_max_fail) return NULL; \
316 if (slab) element = slab_list->reserve_mru ? _name ## _slab_element_pop_tail(&slab->avail) : \
317 _name ## _slab_element_pop_head(&slab->avail); \
318 if (element) { \
319 _name ## _slab_element_insert_tail(&slab->reserved, element); \
320 if (_name ## _slab_element_num_elements(&slab->avail) == 0) { \
321 _name ## _slab_remove(&slab_list->avail, slab); \
322 _name ## _slab_insert_tail(&slab_list->reserved, slab); \
323 } \
324 element->in_use = true; \
325 slab_list->in_use++; \
326 } else { \
327 MEM(element = talloc_zero(slab_list, _name ## _slab_element_t)); \
328 talloc_set_type(element, _type); \
329 talloc_set_destructor(element, _ ## _type ## _element_free); \
330 if (slab_list->alloc) slab_list->alloc((_type *)element, slab_list->uctx); \
331 } \
332 if (slab_list->reserve) slab_list->reserve((_type *)element, slab_list->uctx); \
333 return (_type *)element; \
334 } \
335\
336 /** Set a function to be called when a slab element is released \
337 * \
338 * @param[in] elem Element for which the routine should be set. \
339 * @param[in] func Function to attach. \
340 * @param[in] uctx to be passed to func. \
341 */ \
342 static inline CC_HINT(nonnull(1,2)) void _name ## _slab_element_set_destructor(_type *elem, _type ## _slab_free_t func, void *uctx) \
343 { \
344 _name ## _slab_element_t *element = (_name ## _slab_element_t *)elem; \
345 element->free = func; \
346 element->uctx = uctx; \
347 } \
348\
349 /** Release a slab element \
350 * \
351 * If the element was allocated from a slab then it is returned \
352 * to the list of available elements. Otherwise it is talloc freed. \
353 * \
354 * @param[in] elem to release. \
355 */ \
356 static inline CC_HINT(nonnull) void _name ## _slab_release(_type *elem) \
357 { \
358 _name ## _slab_element_t *element = (_name ## _slab_element_t *)elem; \
359 _name ## _slab_t *slab = element->slab; \
360 if (element->free) element->free(elem, element->uctx); \
361 if (slab) { \
362 _name ## _slab_list_t *slab_list; \
363 slab_list = slab->list; \
364 _name ## _slab_element_remove(&slab->reserved, element); \
365 if (slab_list->release_reset){ \
366 talloc_free_children(element); \
367 memset(&element->elem, 0, sizeof(_type)); \
368 element->free = NULL; \
369 element->uctx = NULL; \
370 } \
371 _name ## _slab_element_insert_tail(&slab->avail, element); \
372 if (_name ## _slab_element_num_elements(&slab->avail) == 1) { \
373 _name ## _slab_remove(&slab_list->reserved, slab); \
374 _name ## _slab_insert_tail(&slab_list->avail, slab); \
375 } \
376 slab_list->in_use--; \
377 element->in_use = false; \
378 return; \
379 } \
380 talloc_free(element); \
381 } \
382\
383 static inline CC_HINT(nonnull) unsigned int _name ## _slab_num_elements_used(_name ## _slab_list_t *slab_list) \
384 { \
385 return slab_list->in_use; \
386 } \
387\
388 static inline CC_HINT(nonnull) unsigned int _name ## _slab_num_allocated(_name ## _slab_list_t *slab_list) \
389 { \
390 return _name ## _slab_num_elements(&slab_list->reserved) + \
391 _name ## _slab_num_elements(&slab_list->avail); \
392 } \
393DIAG_ON(unused-function)
394
395#ifdef __cplusplus
396}
397#endif
#define RCSIDH(h, id)
Definition build.h:488
fr_time_delta_t interval
Interval between slab cleanup events being fired.
Definition slab.h:53
unsigned int min_elements
Minimum number of elements to keep allocated.
Definition slab.h:44
unsigned int max_elements
Maximum number of elements to allocate using slabs.
Definition slab.h:45
bool at_max_fail
Should requests for additional elements fail when the number in use has reached max_elements.
Definition slab.h:46
unsigned int elements_per_slab
Number of elements to allocate per slab.
Definition slab.h:43
unsigned int num_children
How many child allocations are expected off each element.
Definition slab.h:48
size_t child_pool_size
Size of pool space to be allocated to each element.
Definition slab.h:49
bool allow_direct_free
Allow in-use elements to be freed with talloc_free.
Definition slab.h:50
Tuneable parameters for slabs.
Definition slab.h:42
A time delta, a difference in time measured in nanoseconds.
Definition time.h:80