24RCSIDH(slab_h,
"$Id: d2320874ef4975cf7ff6e4a020e56f347a6e6ad7 $")
30#include <freeradius-devel/util/dlist.h>
31#include <freeradius-devel/util/event.h>
35#define FR_SLAB_CONFIG_CONF_PARSER \
36 { FR_CONF_OFFSET("min", fr_slab_config_t, min_elements), .dflt = "10" }, \
37 { FR_CONF_OFFSET("max", fr_slab_config_t, max_elements), .dflt = "100" }, \
38 { FR_CONF_OFFSET("cleanup_interval", fr_slab_config_t, interval), .dflt = "30s" }, \
72#define FR_SLAB_TYPES(_name, _type) \
73 FR_DLIST_TYPES(_name ## _slab) \
74 FR_DLIST_TYPES(_name ## _slab_element) \
76 typedef int (*_type ## _slab_free_t)(_type *elem, void *uctx); \
77 typedef int (*_type ## _slab_alloc_t)(_type *elem, void *uctx); \
78 typedef int (*_type ## _slab_reserve_t)(_type *elem, void *uctx); \
81 FR_DLIST_HEAD(_name ## _slab) reserved; \
82 FR_DLIST_HEAD(_name ## _slab) avail; \
83 fr_event_list_t *el; \
85 fr_slab_config_t config; \
86 unsigned int in_use; \
87 unsigned int high_water_mark; \
88 _type ## _slab_alloc_t alloc; \
89 _type ## _slab_reserve_t reserve; \
93 } _name ## _slab_list_t; \
96 FR_DLIST_ENTRY(_name ## _slab) entry; \
97 _name ## _slab_list_t *list; \
99 FR_DLIST_HEAD(_name ## _slab_element) reserved; \
100 FR_DLIST_HEAD(_name ## _slab_element) avail; \
101 } _name ## _slab_t; \
105 FR_DLIST_ENTRY(_name ## _slab_element) entry; \
107 _name ## _slab_t *slab; \
108 _type ## _slab_free_t free; \
110 } _name ## _slab_element_t;
120#define FR_SLAB_FUNCS(_name, _type) \
121 FR_DLIST_FUNCS(_name ## _slab, _name ## _slab_t, entry) \
122 FR_DLIST_FUNCS(_name ## _slab_element, _name ## _slab_element_t, entry) \
124DIAG_OFF(unused-function) \
132 static void _ ## _name ## _slab_cleanup(fr_timer_list_t *tl, UNUSED fr_time_t now, void *uctx) \
134 _name ## _slab_list_t *slab_list = talloc_get_type_abort(uctx, _name ## _slab_list_t); \
135 _name ## _slab_t *slab = NULL, *next_slab = NULL; \
136 unsigned int to_clear, cleared = 0; \
137 to_clear = (slab_list->high_water_mark - slab_list->in_use) / 2; \
138 if ((slab_list->in_use + to_clear) < slab_list->config.min_elements) \
139 to_clear = slab_list->high_water_mark - slab_list->config.min_elements; \
140 if (to_clear < slab_list->config.elements_per_slab) goto finish; \
141 slab = _name ## _slab_head(&slab_list->avail); \
143 next_slab = _name ## _slab_next(&slab_list->avail, slab); \
144 if (_name ## _slab_element_num_elements(&slab->reserved) > 0) goto next; \
145 _name ## _slab_remove(&slab_list->avail, slab); \
146 cleared += _name ## _slab_element_num_elements(&slab->avail); \
147 to_clear -= _name ## _slab_element_num_elements(&slab->avail); \
148 _name ## _slab_element_talloc_free(&slab->avail); \
150 if (to_clear < slab_list->config.elements_per_slab) break; \
154 slab_list->high_water_mark -= cleared; \
156 (void) fr_timer_in(slab_list, tl, &slab_list->ev, slab_list->config.interval, false, \
157 _ ## _name ## _slab_cleanup, slab_list); \
175 static inline _name ## _slab_list_t *_name ## _slab_list_alloc(TALLOC_CTX *ctx, \
176 fr_event_list_t *el, \
177 fr_slab_config_t const *config, \
178 _type ## _slab_alloc_t alloc, \
179 _type ## _slab_reserve_t reserve, \
181 bool release_reset, \
184 _name ## _slab_list_t *slab; \
185 MEM(slab = talloc_zero(ctx, _name ## _slab_list_t)); \
187 slab->config = *config; \
188 if (slab->config.elements_per_slab == 0) { \
189 slab->config.elements_per_slab = (config->min_elements ? config->min_elements : 1); \
191 slab->alloc = alloc; \
192 slab->reserve = reserve; \
194 slab->release_reset = release_reset; \
195 slab->reserve_mru = reserve_mru; \
196 _name ## _slab_init(&slab->reserved); \
197 _name ## _slab_init(&slab->avail); \
199 if (unlikely(fr_timer_in(slab, el->tl, &slab->ev, config->interval, false, _ ## _name ## _slab_cleanup, slab) < 0)) { \
213 static int _ ## _type ## _element_free(_name ## _slab_element_t *element) \
215 _name ## _slab_t *slab; \
216 if (element->in_use && element->free) element->free(( _type *)element, element->uctx); \
217 if (!element->slab) return 0; \
218 slab = element->slab; \
219 if (element->in_use) { \
220 _name ## _slab_element_remove(&slab->reserved, element); \
222 _name ## _slab_element_remove(&slab->avail, element); \
236 static inline CC_HINT(nonnull) _type *_name ## _slab_reserve(_name ## _slab_list_t *slab_list) \
238 _name ## _slab_t *slab; \
239 _name ## _slab_element_t *element = NULL; \
241 slab = slab_list->reserve_mru ? _name ## _slab_tail(&slab_list->avail) : \
242 _name ## _slab_head(&slab_list->avail); \
243 if (!slab && ((_name ## _slab_num_elements(&slab_list->reserved) * \
244 slab_list->config.elements_per_slab) < slab_list->config.max_elements)) { \
245 _name ## _slab_element_t *new_element; \
246 unsigned int count, elems; \
248 elems = slab_list->config.elements_per_slab * (1 + slab_list->config.num_children); \
249 elem_size = slab_list->config.elements_per_slab * (sizeof(_name ## _slab_element_t) + \
250 slab_list->config.child_pool_size); \
251 MEM(slab = talloc_zero_pooled_object(slab_list, _name ## _slab_t, elems, elem_size)); \
252 _name ## _slab_element_init(&slab->avail); \
253 _name ## _slab_element_init(&slab->reserved); \
254 _name ## _slab_insert_head(&slab_list->avail, slab); \
255 slab->list = slab_list; \
256 for (count = 0; count < slab_list->config.elements_per_slab; count++) { \
257 if (slab_list->config.num_children > 0) { \
258 MEM(new_element = talloc_zero_pooled_object(slab, _name ## _slab_element_t, \
259 slab_list->config.num_children, \
260 slab_list->config.child_pool_size)); \
262 MEM(new_element = talloc_zero(slab, _name ## _slab_element_t)); \
264 talloc_set_type(new_element, _type); \
265 talloc_set_destructor(new_element, _ ## _type ## _element_free); \
266 _name ## _slab_element_insert_tail(&slab->avail, new_element); \
267 new_element->slab = slab; \
273 if (slab_list->alloc) { \
274 _name ## _slab_element_t *prev = NULL; \
275 new_element = NULL; \
276 while ((new_element = _name ## _slab_element_next(&slab->avail, new_element))) { \
277 if (slab_list->alloc((_type *)new_element, slab_list->uctx) < 0) { \
278 prev = _name ## _slab_element_remove(&slab->avail, new_element); \
279 talloc_free(new_element); \
280 new_element = prev; \
285 slab_list->high_water_mark += _name ## _slab_element_num_elements(&slab->avail); \
287 if (!slab && slab_list->config.at_max_fail) return NULL; \
288 if (slab) element = slab_list->reserve_mru ? _name ## _slab_element_pop_tail(&slab->avail) : \
289 _name ## _slab_element_pop_head(&slab->avail); \
291 _name ## _slab_element_insert_tail(&slab->reserved, element); \
292 if (_name ## _slab_element_num_elements(&slab->avail) == 0) { \
293 _name ## _slab_remove(&slab_list->avail, slab); \
294 _name ## _slab_insert_tail(&slab_list->reserved, slab); \
296 element->in_use = true; \
297 slab_list->in_use++; \
299 MEM(element = talloc_zero(slab_list, _name ## _slab_element_t)); \
300 talloc_set_type(element, _type); \
301 talloc_set_destructor(element, _ ## _type ## _element_free); \
302 if (slab_list->alloc) slab_list->alloc((_type *)element, slab_list->uctx); \
304 if (slab_list->reserve) slab_list->reserve((_type *)element, slab_list->uctx); \
305 return (_type *)element; \
314 static inline CC_HINT(nonnull(1,2)) void _name ## _slab_element_set_destructor(_type *elem, _type ## _slab_free_t func, void *uctx) \
316 _name ## _slab_element_t *element = (_name ## _slab_element_t *)elem; \
317 element->free = func; \
318 element->uctx = uctx; \
328 static inline CC_HINT(nonnull) void _name ## _slab_release(_type *elem) \
330 _name ## _slab_element_t *element = (_name ## _slab_element_t *)elem; \
331 _name ## _slab_t *slab = element->slab; \
332 if (element->free) element->free(elem, element->uctx); \
334 _name ## _slab_list_t *slab_list; \
335 slab_list = slab->list; \
336 _name ## _slab_element_remove(&slab->reserved, element); \
337 if (slab_list->release_reset){ \
338 talloc_free_children(element); \
339 memset(&element->elem, 0, sizeof(_type)); \
340 element->free = NULL; \
341 element->uctx = NULL; \
343 _name ## _slab_element_insert_tail(&slab->avail, element); \
344 if (_name ## _slab_element_num_elements(&slab->avail) == 1) { \
345 _name ## _slab_remove(&slab_list->reserved, slab); \
346 _name ## _slab_insert_tail(&slab_list->avail, slab); \
348 slab_list->in_use--; \
349 element->in_use = false; \
352 talloc_free(element); \
355 static inline CC_HINT(nonnull) unsigned int _name ## _slab_num_elements_used(_name ## _slab_list_t *slab_list) \
357 return slab_list->in_use; \
360 static inline CC_HINT(nonnull) unsigned int _name ## _slab_num_allocated(_name ## _slab_list_t *slab_list) \
362 return _name ## _slab_num_elements(&slab_list->reserved) + \
363 _name ## _slab_num_elements(&slab_list->avail); \
365DIAG_ON(unused-function)
fr_time_delta_t interval
Interval between slab cleanup events being fired.
unsigned int min_elements
Minimum number of elements to keep allocated.
unsigned int max_elements
Maximum number of elements to allocate using slabs.
bool at_max_fail
Should requests for additional elements fail when the number in use has reached max_elements.
unsigned int elements_per_slab
Number of elements to allocate per slab.
unsigned int num_children
How many child allocations are expected off each element.
size_t child_pool_size
Size of pool space to be allocated to each element.
Tuneable parameters for slabs.
A time delta, a difference in time measured in nanoseconds.