The FreeRADIUS server $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
Loading...
Searching...
No Matches
slab.h
Go to the documentation of this file.
1#pragma once
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
16 */
17
18/** Resource pool management using slabs of memory
19 *
20 * @file src/lib/util/slab.h
21 *
22 * @copyright 2023 Network RADIUS SAS (legal@networkradius.com)
23 */
24RCSIDH(slab_h, "$Id: d2320874ef4975cf7ff6e4a020e56f347a6e6ad7 $")
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <freeradius-devel/util/dlist.h>
31#include <freeradius-devel/util/event.h>
32
33/** conf_parser_t entries to populate user configurable slab values
34 */
35#define FR_SLAB_CONFIG_CONF_PARSER \
36 { FR_CONF_OFFSET("min", fr_slab_config_t, min_elements), .dflt = "10" }, \
37 { FR_CONF_OFFSET("max", fr_slab_config_t, max_elements), .dflt = "100" }, \
38 { FR_CONF_OFFSET("cleanup_interval", fr_slab_config_t, interval), .dflt = "30s" }, \
39
40/** Tuneable parameters for slabs
41 */
42typedef struct { \
43 unsigned int elements_per_slab; //!< Number of elements to allocate per slab.
44 unsigned int min_elements; //!< Minimum number of elements to keep allocated.
45 unsigned int max_elements; //!< Maximum number of elements to allocate using slabs.
46 bool at_max_fail; //!< Should requests for additional elements fail when the
47 ///< number in use has reached max_elements.
48 unsigned int num_children; //!< How many child allocations are expected off each element.
49 size_t child_pool_size; //!< Size of pool space to be allocated to each element.
50 fr_time_delta_t interval; //!< Interval between slab cleanup events being fired.
52
53/** Define type specific wrapper structs for slabs and slab elements
54 *
55 * @note This macro should be used inside the header for the area of code
56 * which will use the type specific slabs.
57 *
58 * The top level structure is the slab_list. Slab elements are reserved
59 * from this list. One or more slabs are created as children of the slab_list
60 * each of which consist of a number of elements.
61 *
62 * Within the slab_list there are two dlists which hold the slabs. Slabs in
63 * the `reserved` list have all their elements in use. Slabs in the `avail`
64 * list have elements available to reserve.
65 *
66 * Allocation of new elements can cause an allocation callback to be invoked
67 * if elements need initialisation.
68 *
69 * @param[in] _name to use in type specific structures.
70 * @param[in] _type of structure which will be held in the slab elements.
71 */
72#define FR_SLAB_TYPES(_name, _type) \
73 FR_DLIST_TYPES(_name ## _slab) \
74 FR_DLIST_TYPES(_name ## _slab_element) \
75\
76 typedef int (*_type ## _slab_free_t)(_type *elem, void *uctx); \
77 typedef int (*_type ## _slab_alloc_t)(_type *elem, void *uctx); \
78 typedef int (*_type ## _slab_reserve_t)(_type *elem, void *uctx); \
79\
80 typedef struct { \
81 FR_DLIST_HEAD(_name ## _slab) reserved; \
82 FR_DLIST_HEAD(_name ## _slab) avail; \
83 fr_event_list_t *el; \
84 fr_timer_t *ev; \
85 fr_slab_config_t config; \
86 unsigned int in_use; \
87 unsigned int high_water_mark; \
88 _type ## _slab_alloc_t alloc; \
89 _type ## _slab_reserve_t reserve; \
90 void *uctx; \
91 bool release_reset; \
92 bool reserve_mru; \
93 } _name ## _slab_list_t; \
94\
95 typedef struct { \
96 FR_DLIST_ENTRY(_name ## _slab) entry; \
97 _name ## _slab_list_t *list; \
98 TALLOC_CTX *pool; \
99 FR_DLIST_HEAD(_name ## _slab_element) reserved; \
100 FR_DLIST_HEAD(_name ## _slab_element) avail; \
101 } _name ## _slab_t; \
102\
103 typedef struct { \
104 _type elem; \
105 FR_DLIST_ENTRY(_name ## _slab_element) entry; \
106 bool in_use; \
107 _name ## _slab_t *slab; \
108 _type ## _slab_free_t free; \
109 void *uctx; \
110 } _name ## _slab_element_t;
111
112/** Define type specific wrapper functions for slabs and slab elements
113 *
114 * @note This macro should be used inside the source file that will use
115 * the type specific functions.
116 *
117 * @param[in] _name used in type specific structures.
118 * @param[in] _type of structure returned by the reserve function.
119 */
120#define FR_SLAB_FUNCS(_name, _type) \
121 FR_DLIST_FUNCS(_name ## _slab, _name ## _slab_t, entry) \
122 FR_DLIST_FUNCS(_name ## _slab_element, _name ## _slab_element_t, entry) \
123\
124DIAG_OFF(unused-function) \
125 /** Timer event for freeing unused slabs \
126 * \
127 * Called periodically to clear up slab allocations. \
128 * Slabs where all elements are not in use will be freed, \
129 * up to half of the element count between the high water mark \
130 * and the current number in use. \
131 */ \
132 static void _ ## _name ## _slab_cleanup(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) \
133 { \
134 _name ## _slab_list_t *slab_list = talloc_get_type_abort(uctx, _name ## _slab_list_t); \
135 _name ## _slab_t *slab = NULL, *next_slab = NULL; \
136 unsigned int to_clear, cleared = 0; \
137 to_clear = (slab_list->high_water_mark - slab_list->in_use) / 2; \
138 if ((slab_list->in_use + to_clear) < slab_list->config.min_elements) \
139 to_clear = slab_list->high_water_mark - slab_list->config.min_elements; \
140 if (to_clear < slab_list->config.elements_per_slab) goto finish; \
141 slab = _name ## _slab_head(&slab_list->avail); \
142 while (slab) { \
143 next_slab = _name ## _slab_next(&slab_list->avail, slab); \
144 if (_name ## _slab_element_num_elements(&slab->reserved) > 0) goto next; \
145 _name ## _slab_remove(&slab_list->avail, slab); \
146 cleared += _name ## _slab_element_num_elements(&slab->avail); \
147 to_clear -= _name ## _slab_element_num_elements(&slab->avail); \
148 _name ## _slab_element_talloc_free(&slab->avail); \
149 talloc_free(slab); \
150 if (to_clear < slab_list->config.elements_per_slab) break; \
151 next: \
152 slab = next_slab; \
153 } \
154 slab_list->high_water_mark -= cleared; \
155 finish: \
156 (void) fr_timer_in(slab_list, tl, &slab_list->ev, slab_list->config.interval, false, \
157 _ ## _name ## _slab_cleanup, slab_list); \
158 } \
159\
160 /** Allocate a slab list to manage slabs of allocated memory \
161 * \
162 * @param[in] ctx in which to allocate the slab list. \
163 * @param[in] el Event list in which to run clean up function. \
164 * @param[in] config Slab config parameters. \
165 * @param[in] alloc Optional callback to use when allocating new elements. \
166 * @param[in] reserve Optional callback run on element reserving. \
167 * @param[in] uctx to pass to callbacks. \
168 * @param[in] release_reset On release: free talloc children, zero element memory. \
169 * Use when elements may accumulate allocations that should not persist. \
170 * @param[in] reserve_mru If true, the most recently used element will be returned when an element is reserved. \
171 * @return \
172 * - A new slab. \
173 * - NULL on error. \
174 */ \
175 static inline _name ## _slab_list_t *_name ## _slab_list_alloc(TALLOC_CTX *ctx, \
176 fr_event_list_t *el, \
177 fr_slab_config_t const *config, \
178 _type ## _slab_alloc_t alloc, \
179 _type ## _slab_reserve_t reserve, \
180 void *uctx, \
181 bool release_reset, \
182 bool reserve_mru) \
183 { \
184 _name ## _slab_list_t *slab; \
185 MEM(slab = talloc_zero(ctx, _name ## _slab_list_t)); \
186 slab->el = el; \
187 slab->config = *config; \
188 if (slab->config.elements_per_slab == 0) { \
189 slab->config.elements_per_slab = (config->min_elements ? config->min_elements : 1); \
190 } \
191 slab->alloc = alloc; \
192 slab->reserve = reserve; \
193 slab->uctx = uctx; \
194 slab->release_reset = release_reset; \
195 slab->reserve_mru = reserve_mru; \
196 _name ## _slab_init(&slab->reserved); \
197 _name ## _slab_init(&slab->avail); \
198 if (el) { \
199 if (unlikely(fr_timer_in(slab, el->tl, &slab->ev, config->interval, false, _ ## _name ## _slab_cleanup, slab) < 0)) { \
200 talloc_free(slab); \
201 return NULL; \
202 }; \
203 } \
204 return slab; \
205 } \
206\
207 /** Callback for talloc freeing a slab element \
208 * \
209 * Ensure that the element reset and custom destructor is called even if \
210 * the element is not released before being freed. \
211 * Also ensure the element is removed from the relevant list. \
212 */ \
213 static int _ ## _type ## _element_free(_name ## _slab_element_t *element) \
214 { \
215 _name ## _slab_t *slab; \
216 if (element->in_use && element->free) element->free(( _type *)element, element->uctx); \
217 if (!element->slab) return 0; \
218 slab = element->slab; \
219 if (element->in_use) { \
220 _name ## _slab_element_remove(&slab->reserved, element); \
221 } else { \
222 _name ## _slab_element_remove(&slab->avail, element); \
223 } \
224 return 0; \
225 } \
226\
227 /** Reserve a slab element \
228 * \
229 * If there is not a free slab element already allocated, \
230 * a new slab will be allocated, until the the max_elements limit \
231 * of elements have been created. \
232 * \
233 * @param[in] slab_list to reserve an element from. \
234 * @return a slab element. \
235 */ \
236 static inline CC_HINT(nonnull) _type *_name ## _slab_reserve(_name ## _slab_list_t *slab_list) \
237 { \
238 _name ## _slab_t *slab; \
239 _name ## _slab_element_t *element = NULL; \
240\
241 slab = slab_list->reserve_mru ? _name ## _slab_tail(&slab_list->avail) : \
242 _name ## _slab_head(&slab_list->avail); \
243 if (!slab && ((_name ## _slab_num_elements(&slab_list->reserved) * \
244 slab_list->config.elements_per_slab) < slab_list->config.max_elements)) { \
245 _name ## _slab_element_t *new_element; \
246 unsigned int count, elems; \
247 size_t elem_size; \
248 elems = slab_list->config.elements_per_slab * (1 + slab_list->config.num_children); \
249 elem_size = slab_list->config.elements_per_slab * (sizeof(_name ## _slab_element_t) + \
250 slab_list->config.child_pool_size); \
251 MEM(slab = talloc_zero_pooled_object(slab_list, _name ## _slab_t, elems, elem_size)); \
252 _name ## _slab_element_init(&slab->avail); \
253 _name ## _slab_element_init(&slab->reserved); \
254 _name ## _slab_insert_head(&slab_list->avail, slab); \
255 slab->list = slab_list; \
256 for (count = 0; count < slab_list->config.elements_per_slab; count++) { \
257 if (slab_list->config.num_children > 0) { \
258 MEM(new_element = talloc_zero_pooled_object(slab, _name ## _slab_element_t, \
259 slab_list->config.num_children, \
260 slab_list->config.child_pool_size)); \
261 } else { \
262 MEM(new_element = talloc_zero(slab, _name ## _slab_element_t)); \
263 } \
264 talloc_set_type(new_element, _type); \
265 talloc_set_destructor(new_element, _ ## _type ## _element_free); \
266 _name ## _slab_element_insert_tail(&slab->avail, new_element); \
267 new_element->slab = slab; \
268 } \
269 /* Initialisation of new elements done after allocation to ensure \
270 * are all allocated from the pool. Without this, any talloc done \
271 * by the callback may take memory from the pool. \
272 * Any elements which fail to initialise are freed immediately. */ \
273 if (slab_list->alloc) { \
274 _name ## _slab_element_t *prev = NULL; \
275 new_element = NULL; \
276 while ((new_element = _name ## _slab_element_next(&slab->avail, new_element))) { \
277 if (slab_list->alloc((_type *)new_element, slab_list->uctx) < 0) { \
278 prev = _name ## _slab_element_remove(&slab->avail, new_element); \
279 talloc_free(new_element); \
280 new_element = prev; \
281 continue; \
282 } \
283 } \
284 } \
285 slab_list->high_water_mark += _name ## _slab_element_num_elements(&slab->avail); \
286 } \
287 if (!slab && slab_list->config.at_max_fail) return NULL; \
288 if (slab) element = slab_list->reserve_mru ? _name ## _slab_element_pop_tail(&slab->avail) : \
289 _name ## _slab_element_pop_head(&slab->avail); \
290 if (element) { \
291 _name ## _slab_element_insert_tail(&slab->reserved, element); \
292 if (_name ## _slab_element_num_elements(&slab->avail) == 0) { \
293 _name ## _slab_remove(&slab_list->avail, slab); \
294 _name ## _slab_insert_tail(&slab_list->reserved, slab); \
295 } \
296 element->in_use = true; \
297 slab_list->in_use++; \
298 } else { \
299 MEM(element = talloc_zero(slab_list, _name ## _slab_element_t)); \
300 talloc_set_type(element, _type); \
301 talloc_set_destructor(element, _ ## _type ## _element_free); \
302 if (slab_list->alloc) slab_list->alloc((_type *)element, slab_list->uctx); \
303 } \
304 if (slab_list->reserve) slab_list->reserve((_type *)element, slab_list->uctx); \
305 return (_type *)element; \
306 } \
307\
308 /** Set a function to be called when a slab element is released \
309 * \
310 * @param[in] elem Element for which the routine should be set. \
311 * @param[in] func Function to attach. \
312 * @param[in] uctx to be passed to func. \
313 */ \
314 static inline CC_HINT(nonnull(1,2)) void _name ## _slab_element_set_destructor(_type *elem, _type ## _slab_free_t func, void *uctx) \
315 { \
316 _name ## _slab_element_t *element = (_name ## _slab_element_t *)elem; \
317 element->free = func; \
318 element->uctx = uctx; \
319 } \
320\
321 /** Release a slab element \
322 * \
323 * If the element was allocated from a slab then it is returned \
324 * to the list of available elements. Otherwise it is talloc freed. \
325 * \
326 * @param[in] elem to release. \
327 */ \
328 static inline CC_HINT(nonnull) void _name ## _slab_release(_type *elem) \
329 { \
330 _name ## _slab_element_t *element = (_name ## _slab_element_t *)elem; \
331 _name ## _slab_t *slab = element->slab; \
332 if (element->free) element->free(elem, element->uctx); \
333 if (slab) { \
334 _name ## _slab_list_t *slab_list; \
335 slab_list = slab->list; \
336 _name ## _slab_element_remove(&slab->reserved, element); \
337 if (slab_list->release_reset){ \
338 talloc_free_children(element); \
339 memset(&element->elem, 0, sizeof(_type)); \
340 element->free = NULL; \
341 element->uctx = NULL; \
342 } \
343 _name ## _slab_element_insert_tail(&slab->avail, element); \
344 if (_name ## _slab_element_num_elements(&slab->avail) == 1) { \
345 _name ## _slab_remove(&slab_list->reserved, slab); \
346 _name ## _slab_insert_tail(&slab_list->avail, slab); \
347 } \
348 slab_list->in_use--; \
349 element->in_use = false; \
350 return; \
351 } \
352 talloc_free(element); \
353 } \
354\
355 static inline CC_HINT(nonnull) unsigned int _name ## _slab_num_elements_used(_name ## _slab_list_t *slab_list) \
356 { \
357 return slab_list->in_use; \
358 } \
359\
360 static inline CC_HINT(nonnull) unsigned int _name ## _slab_num_allocated(_name ## _slab_list_t *slab_list) \
361 { \
362 return _name ## _slab_num_elements(&slab_list->reserved) + \
363 _name ## _slab_num_elements(&slab_list->avail); \
364 } \
365DIAG_ON(unused-function)
366
367#ifdef __cplusplus
368}
369#endif
#define RCSIDH(h, id)
Definition build.h:488
fr_time_delta_t interval
Interval between slab cleanup events being fired.
Definition slab.h:50
unsigned int min_elements
Minimum number of elements to keep allocated.
Definition slab.h:44
unsigned int max_elements
Maximum number of elements to allocate using slabs.
Definition slab.h:45
bool at_max_fail
Should requests for additional elements fail when the number in use has reached max_elements.
Definition slab.h:46
unsigned int elements_per_slab
Number of elements to allocate per slab.
Definition slab.h:43
unsigned int num_children
How many child allocations are expected off each element.
Definition slab.h:48
size_t child_pool_size
Size of pool space to be allocated to each element.
Definition slab.h:49
Tuneable parameters for slabs.
Definition slab.h:42
A time delta, a difference in time measured in nanoseconds.
Definition time.h:80