The FreeRADIUS server $Id: 15bac2a4c627c01d1aa2047687b3418955ac7f00 $
Loading...
Searching...
No Matches
slab.h
Go to the documentation of this file.
1#pragma once
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
16 */
17
18/** Resource pool management using slabs of memory
19 *
20 * @file src/lib/util/slab.h
21 *
22 * @copyright 2023 Network RADIUS SAS (legal@networkradius.com)
23 */
24RCSIDH(slab_h, "$Id: b40cbdd18e97992597f591d96e54799f1eac167d $")
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30#include <freeradius-devel/util/dlist.h>
31#include <freeradius-devel/util/event.h>
32
33/** conf_parser_t entries to populate user configurable slab values
34 */
35#define FR_SLAB_CONFIG_CONF_PARSER \
36 { FR_CONF_OFFSET("min", fr_slab_config_t, min_elements), .dflt = "10" }, \
37 { FR_CONF_OFFSET("max", fr_slab_config_t, max_elements), .dflt = "100" }, \
38 { FR_CONF_OFFSET("cleanup_interval", fr_slab_config_t, interval), .dflt = "30s" }, \
39
40/** Tuneable parameters for slabs
41 */
42typedef struct { \
43 unsigned int elements_per_slab; //!< Number of elements to allocate per slab.
44 unsigned int min_elements; //!< Minimum number of elements to keep allocated.
45 unsigned int max_elements; //!< Maximum number of elements to allocate using slabs.
46 bool at_max_fail; //!< Should requests for additional elements fail when the
47 ///< number in use has reached max_elements.
48 unsigned int num_children; //!< How many child allocations are expected off each element.
49 size_t child_pool_size; //!< Size of pool space to be allocated to each element.
50 fr_time_delta_t interval; //!< Interval between slab cleanup events being fired.
52
53/** Define type specific wrapper structs for slabs and slab elements
54 *
55 * @note This macro should be used inside the header for the area of code
56 * which will use the type specific slabs.
57 *
58 * The top level structure is the slab_list. Slab elements are reserved
59 * from this list. One or more slabs are created as children of the slab_list
60 * each of which consist of a number of elements.
61 *
62 * Within the slab_list there are two dlists which hold the slabs. Slabs in
63 * the `reserved` list have all their elements in use. Slabs in the `avail`
64 * list have elements available to reserve.
65 *
66 * Allocation of new elements can cause an allocation callback to be invoked
67 * if elements need initialisation.
68 *
69 * @param[in] _name to use in type specific structures.
70 * @param[in] _type of structure which will be held in the slab elements.
71 */
72#define FR_SLAB_TYPES(_name, _type) \
73 FR_DLIST_TYPES(_name ## _slab) \
74 FR_DLIST_TYPES(_name ## _slab_element) \
75\
76 typedef int (*_type ## _slab_free_t)(_type *elem, void *uctx); \
77 typedef int (*_type ## _slab_alloc_t)(_type *elem, void *uctx); \
78 typedef int (*_type ## _slab_reserve_t)(_type *elem, void *uctx); \
79\
80 typedef struct { \
81 FR_DLIST_HEAD(_name ## _slab) reserved; \
82 FR_DLIST_HEAD(_name ## _slab) avail; \
83 fr_event_list_t *el; \
84 fr_event_timer_t const *ev; \
85 fr_slab_config_t config; \
86 unsigned int in_use; \
87 unsigned int high_water_mark; \
88 _type ## _slab_alloc_t alloc; \
89 _type ## _slab_reserve_t reserve; \
90 void *uctx; \
91 bool release_reset; \
92 bool reserve_mru; \
93 } _name ## _slab_list_t; \
94\
95 typedef struct { \
96 FR_DLIST_ENTRY(_name ## _slab) entry; \
97 _name ## _slab_list_t *list; \
98 TALLOC_CTX *pool; \
99 FR_DLIST_HEAD(_name ## _slab_element) reserved; \
100 FR_DLIST_HEAD(_name ## _slab_element) avail; \
101 } _name ## _slab_t; \
102\
103 typedef struct { \
104 _type elem; \
105 FR_DLIST_ENTRY(_name ## _slab_element) entry; \
106 bool in_use; \
107 _name ## _slab_t *slab; \
108 _type ## _slab_free_t free; \
109 void *uctx; \
110 } _name ## _slab_element_t;
111
112/** Define type specific wrapper functions for slabs and slab elements
113 *
114 * @note This macro should be used inside the source file that will use
115 * the type specific functions.
116 *
117 * @param[in] _name used in type specific structures.
118 * @param[in] _type of structure returned by the reserve function.
119 */
120#define FR_SLAB_FUNCS(_name, _type) \
121 FR_DLIST_FUNCS(_name ## _slab, _name ## _slab_t, entry) \
122 FR_DLIST_FUNCS(_name ## _slab_element, _name ## _slab_element_t, entry) \
123\
124DIAG_OFF(unused-function) \
125 /** Timer event for freeing unused slabs \
126 * \
127 * Called periodically to clear up slab allocations. \
128 * Slabs where all elements are not in use will be freed, \
129 * up to half of the element count between the high water mark \
130 * and the current number in use. \
131 */ \
132 static void _ ## _name ## _slab_cleanup(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) \
133 { \
134 _name ## _slab_list_t *slab_list = talloc_get_type_abort(uctx, _name ## _slab_list_t); \
135 _name ## _slab_t *slab = NULL, *next_slab = NULL; \
136 unsigned int to_clear, cleared = 0; \
137 to_clear = (slab_list->high_water_mark - slab_list->in_use) / 2; \
138 if ((slab_list->in_use + to_clear) < slab_list->config.min_elements) \
139 to_clear = slab_list->high_water_mark - slab_list->config.min_elements; \
140 if (to_clear < slab_list->config.elements_per_slab) goto finish; \
141 slab = _name ## _slab_head(&slab_list->avail); \
142 while (slab) { \
143 next_slab = _name ## _slab_next(&slab_list->avail, slab); \
144 if (_name ## _slab_element_num_elements(&slab->reserved) > 0) goto next; \
145 _name ## _slab_remove(&slab_list->avail, slab); \
146 cleared += _name ## _slab_element_num_elements(&slab->avail); \
147 to_clear -= _name ## _slab_element_num_elements(&slab->avail); \
148 _name ## _slab_element_talloc_free(&slab->avail); \
149 talloc_free(slab); \
150 if (to_clear < slab_list->config.elements_per_slab) break; \
151 next: \
152 slab = next_slab; \
153 } \
154 slab_list->high_water_mark -= cleared; \
155 finish: \
156 (void) fr_event_timer_in(slab_list, el, &slab_list->ev, slab_list->config.interval, \
157 _ ## _name ## _slab_cleanup, slab_list); \
158 } \
159\
160 /** Allocate a slab list to manage slabs of allocated memory \
161 * \
162 * @param[in] ctx in which to allocate the slab list. \
163 * @param[in] el Event list in which to run clean up function. \
164 * @param[in] config Slab config parameters. \
165 * @param[in] alloc Optional callback to use when allocating new elements. \
166 * @param[in] reserve Optional callback run on element reserving. \
167 * @param[in] uctx to pass to callbacks. \
168 * @param[in] release_reset Should elements be reset and children freed when the element is released.\
169 * @param[in] reserve_mru If true, the most recently used element will be returned when an element is reserved. \
170 * @return \
171 * - A new slab. \
172 * - NULL on error. \
173 */ \
174 static inline _name ## _slab_list_t *_name ## _slab_list_alloc(TALLOC_CTX *ctx, \
175 fr_event_list_t *el, \
176 fr_slab_config_t const *config, \
177 _type ## _slab_alloc_t alloc, \
178 _type ## _slab_reserve_t reserve, \
179 void *uctx, \
180 bool release_reset, \
181 bool reserve_mru) \
182 { \
183 _name ## _slab_list_t *slab; \
184 MEM(slab = talloc_zero(ctx, _name ## _slab_list_t)); \
185 slab->el = el; \
186 slab->config = *config; \
187 if (slab->config.elements_per_slab == 0) { \
188 slab->config.elements_per_slab = (config->min_elements ? config->min_elements : 1); \
189 } \
190 slab->alloc = alloc; \
191 slab->reserve = reserve; \
192 slab->uctx = uctx; \
193 slab->release_reset = release_reset; \
194 slab->reserve_mru = reserve_mru; \
195 _name ## _slab_init(&slab->reserved); \
196 _name ## _slab_init(&slab->avail); \
197 if (el) { \
198 if (unlikely(fr_event_timer_in(slab, el, &slab->ev, config->interval, _ ## _name ## _slab_cleanup, slab) < 0)) { \
199 talloc_free(slab); \
200 return NULL; \
201 }; \
202 } \
203 return slab; \
204 } \
205\
206 /** Callback for talloc freeing a slab element \
207 * \
208 * Ensure that the element reset and custom destructor is called even if \
209 * the element is not released before being freed. \
210 * Also ensure the element is removed from the relevant list. \
211 */ \
212 static int _ ## _type ## _element_free(_name ## _slab_element_t *element) \
213 { \
214 _name ## _slab_t *slab; \
215 if (element->in_use && element->free) element->free(( _type *)element, element->uctx); \
216 if (!element->slab) return 0; \
217 slab = element->slab; \
218 if (element->in_use) { \
219 _name ## _slab_element_remove(&slab->reserved, element); \
220 } else { \
221 _name ## _slab_element_remove(&slab->avail, element); \
222 } \
223 return 0; \
224 } \
225\
226 /** Reserve a slab element \
227 * \
228 * If there is not a free slab element already allocated, \
229 * a new slab will be allocated, until the the max_elements limit \
230 * of elements have been created. \
231 * \
232 * @param[in] slab_list to reserve an element from. \
233 * @return a slab element. \
234 */ \
235 static inline CC_HINT(nonnull) _type *_name ## _slab_reserve(_name ## _slab_list_t *slab_list) \
236 { \
237 _name ## _slab_t *slab; \
238 _name ## _slab_element_t *element = NULL; \
239\
240 slab = slab_list->reserve_mru ? _name ## _slab_tail(&slab_list->avail) : \
241 _name ## _slab_head(&slab_list->avail); \
242 if (!slab && ((_name ## _slab_num_elements(&slab_list->reserved) * \
243 slab_list->config.elements_per_slab) < slab_list->config.max_elements)) { \
244 _name ## _slab_element_t *new_element; \
245 unsigned int count, elems; \
246 size_t elem_size; \
247 elems = slab_list->config.elements_per_slab * (1 + slab_list->config.num_children); \
248 elem_size = slab_list->config.elements_per_slab * (sizeof(_name ## _slab_element_t) + \
249 slab_list->config.child_pool_size); \
250 MEM(slab = talloc_zero_pooled_object(slab_list, _name ## _slab_t, elems, elem_size)); \
251 _name ## _slab_element_init(&slab->avail); \
252 _name ## _slab_element_init(&slab->reserved); \
253 _name ## _slab_insert_head(&slab_list->avail, slab); \
254 slab->list = slab_list; \
255 for (count = 0; count < slab_list->config.elements_per_slab; count++) { \
256 if (slab_list->config.num_children > 0) { \
257 MEM(new_element = talloc_zero_pooled_object(slab, _name ## _slab_element_t, \
258 slab_list->config.num_children, \
259 slab_list->config.child_pool_size)); \
260 } else { \
261 MEM(new_element = talloc_zero(slab, _name ## _slab_element_t)); \
262 } \
263 talloc_set_type(new_element, _type); \
264 talloc_set_destructor(new_element, _ ## _type ## _element_free); \
265 _name ## _slab_element_insert_tail(&slab->avail, new_element); \
266 new_element->slab = slab; \
267 } \
268 /* Initialisation of new elements done after allocation to ensure \
269 * are all allocated from the pool. Without this, any talloc done \
270 * by the callback may take memory from the pool. \
271 * Any elements which fail to initialise are freed immediately. */ \
272 if (slab_list->alloc) { \
273 _name ## _slab_element_t *prev = NULL; \
274 new_element = NULL; \
275 while ((new_element = _name ## _slab_element_next(&slab->avail, new_element))) { \
276 if (slab_list->alloc((_type *)new_element, slab_list->uctx) < 0) { \
277 prev = _name ## _slab_element_remove(&slab->avail, new_element); \
278 talloc_free(new_element); \
279 new_element = prev; \
280 continue; \
281 } \
282 } \
283 } \
284 slab_list->high_water_mark += _name ## _slab_element_num_elements(&slab->avail); \
285 } \
286 if (!slab && slab_list->config.at_max_fail) return NULL; \
287 if (slab) element = slab_list->reserve_mru ? _name ## _slab_element_pop_tail(&slab->avail) : \
288 _name ## _slab_element_pop_head(&slab->avail); \
289 if (element) { \
290 _name ## _slab_element_insert_tail(&slab->reserved, element); \
291 if (_name ## _slab_element_num_elements(&slab->avail) == 0) { \
292 _name ## _slab_remove(&slab_list->avail, slab); \
293 _name ## _slab_insert_tail(&slab_list->reserved, slab); \
294 } \
295 element->in_use = true; \
296 slab_list->in_use++; \
297 } else { \
298 MEM(element = talloc_zero(slab_list, _name ## _slab_element_t)); \
299 talloc_set_type(element, _type); \
300 talloc_set_destructor(element, _ ## _type ## _element_free); \
301 if (slab_list->alloc) slab_list->alloc((_type *)element, slab_list->uctx); \
302 } \
303 if (slab_list->reserve) slab_list->reserve((_type *)element, slab_list->uctx); \
304 return (_type *)element; \
305 } \
306\
307 /** Set a function to be called when a slab element is released \
308 * \
309 * @param[in] elem Element for which the routine should be set. \
310 * @param[in] func Function to attach. \
311 * @param[in] uctx to be passed to func. \
312 */ \
313 static inline CC_HINT(nonnull(1,2)) void _name ## _slab_element_set_destructor(_type *elem, _type ## _slab_free_t func, void *uctx) \
314 { \
315 _name ## _slab_element_t *element = (_name ## _slab_element_t *)elem; \
316 element->free = func; \
317 element->uctx = uctx; \
318 } \
319\
320 /** Release a slab element \
321 * \
322 * If the element was allocated from a slab then it is returned \
323 * to the list of available elements. Otherwise it is talloc freed. \
324 * \
325 * @param[in] elem to release. \
326 */ \
327 static inline CC_HINT(nonnull) void _name ## _slab_release(_type *elem) \
328 { \
329 _name ## _slab_element_t *element = (_name ## _slab_element_t *)elem; \
330 _name ## _slab_t *slab = element->slab; \
331 if (element->free) element->free(elem, element->uctx); \
332 if (slab) { \
333 _name ## _slab_list_t *slab_list; \
334 slab_list = slab->list; \
335 _name ## _slab_element_remove(&slab->reserved, element); \
336 if (slab_list->release_reset){ \
337 talloc_free_children(element); \
338 memset(&element->elem, 0, sizeof(_type)); \
339 element->free = NULL; \
340 element->uctx = NULL; \
341 } \
342 _name ## _slab_element_insert_tail(&slab->avail, element); \
343 if (_name ## _slab_element_num_elements(&slab->avail) == 1) { \
344 _name ## _slab_remove(&slab_list->reserved, slab); \
345 _name ## _slab_insert_tail(&slab_list->avail, slab); \
346 } \
347 slab_list->in_use--; \
348 element->in_use = false; \
349 return; \
350 } \
351 talloc_free(element); \
352 } \
353\
354 static inline CC_HINT(nonnull) unsigned int _name ## _slab_num_elements_used(_name ## _slab_list_t *slab_list) \
355 { \
356 return slab_list->in_use; \
357 } \
358\
359 static inline CC_HINT(nonnull) unsigned int _name ## _slab_num_allocated(_name ## _slab_list_t *slab_list) \
360 { \
361 return _name ## _slab_num_elements(&slab_list->reserved) + \
362 _name ## _slab_num_elements(&slab_list->avail); \
363 } \
364DIAG_ON(unused-function)
365
366#ifdef __cplusplus
367}
368#endif
#define RCSIDH(h, id)
Definition build.h:484
fr_time_delta_t interval
Interval between slab cleanup events being fired.
Definition slab.h:50
unsigned int min_elements
Minimum number of elements to keep allocated.
Definition slab.h:44
unsigned int max_elements
Maximum number of elements to allocate using slabs.
Definition slab.h:45
bool at_max_fail
Should requests for additional elements fail when the number in use has reached max_elements.
Definition slab.h:46
unsigned int elements_per_slab
Number of elements to allocate per slab.
Definition slab.h:43
unsigned int num_children
How many child allocations are expected off each element.
Definition slab.h:48
size_t child_pool_size
Size of pool space to be allocated to each element.
Definition slab.h:49
Tuneable parameters for slabs.
Definition slab.h:42
A time delta, a difference in time measured in nanoseconds.
Definition time.h:80