24RCSIDH(slab_h,
"$Id: b40cbdd18e97992597f591d96e54799f1eac167d $")
30#include <freeradius-devel/util/dlist.h>
31#include <freeradius-devel/util/event.h>
35#define FR_SLAB_CONFIG_CONF_PARSER \
36 { FR_CONF_OFFSET("min", fr_slab_config_t, min_elements), .dflt = "10" }, \
37 { FR_CONF_OFFSET("max", fr_slab_config_t, max_elements), .dflt = "100" }, \
38 { FR_CONF_OFFSET("cleanup_interval", fr_slab_config_t, interval), .dflt = "30s" }, \
72#define FR_SLAB_TYPES(_name, _type) \
73 FR_DLIST_TYPES(_name ## _slab) \
74 FR_DLIST_TYPES(_name ## _slab_element) \
76 typedef int (*_type ## _slab_free_t)(_type *elem, void *uctx); \
77 typedef int (*_type ## _slab_alloc_t)(_type *elem, void *uctx); \
78 typedef int (*_type ## _slab_reserve_t)(_type *elem, void *uctx); \
81 FR_DLIST_HEAD(_name ## _slab) reserved; \
82 FR_DLIST_HEAD(_name ## _slab) avail; \
83 fr_event_list_t *el; \
84 fr_event_timer_t const *ev; \
85 fr_slab_config_t config; \
86 unsigned int in_use; \
87 unsigned int high_water_mark; \
88 _type ## _slab_alloc_t alloc; \
89 _type ## _slab_reserve_t reserve; \
93 } _name ## _slab_list_t; \
96 FR_DLIST_ENTRY(_name ## _slab) entry; \
97 _name ## _slab_list_t *list; \
99 FR_DLIST_HEAD(_name ## _slab_element) reserved; \
100 FR_DLIST_HEAD(_name ## _slab_element) avail; \
101 } _name ## _slab_t; \
105 FR_DLIST_ENTRY(_name ## _slab_element) entry; \
107 _name ## _slab_t *slab; \
108 _type ## _slab_free_t free; \
110 } _name ## _slab_element_t;
120#define FR_SLAB_FUNCS(_name, _type) \
121 FR_DLIST_FUNCS(_name ## _slab, _name ## _slab_t, entry) \
122 FR_DLIST_FUNCS(_name ## _slab_element, _name ## _slab_element_t, entry) \
124DIAG_OFF(unused-function) \
132 static void _ ## _name ## _slab_cleanup(fr_event_list_t *el, UNUSED fr_time_t now, void *uctx) \
134 _name ## _slab_list_t *slab_list = talloc_get_type_abort(uctx, _name ## _slab_list_t); \
135 _name ## _slab_t *slab = NULL, *next_slab = NULL; \
136 unsigned int to_clear, cleared = 0; \
137 to_clear = (slab_list->high_water_mark - slab_list->in_use) / 2; \
138 if ((slab_list->in_use + to_clear) < slab_list->config.min_elements) \
139 to_clear = slab_list->high_water_mark - slab_list->config.min_elements; \
140 if (to_clear < slab_list->config.elements_per_slab) goto finish; \
141 slab = _name ## _slab_head(&slab_list->avail); \
143 next_slab = _name ## _slab_next(&slab_list->avail, slab); \
144 if (_name ## _slab_element_num_elements(&slab->reserved) > 0) goto next; \
145 _name ## _slab_remove(&slab_list->avail, slab); \
146 cleared += _name ## _slab_element_num_elements(&slab->avail); \
147 to_clear -= _name ## _slab_element_num_elements(&slab->avail); \
148 _name ## _slab_element_talloc_free(&slab->avail); \
150 if (to_clear < slab_list->config.elements_per_slab) break; \
154 slab_list->high_water_mark -= cleared; \
156 (void) fr_event_timer_in(slab_list, el, &slab_list->ev, slab_list->config.interval, \
157 _ ## _name ## _slab_cleanup, slab_list); \
174 static inline _name ## _slab_list_t *_name ## _slab_list_alloc(TALLOC_CTX *ctx, \
175 fr_event_list_t *el, \
176 fr_slab_config_t const *config, \
177 _type ## _slab_alloc_t alloc, \
178 _type ## _slab_reserve_t reserve, \
180 bool release_reset, \
183 _name ## _slab_list_t *slab; \
184 MEM(slab = talloc_zero(ctx, _name ## _slab_list_t)); \
186 slab->config = *config; \
187 if (slab->config.elements_per_slab == 0) { \
188 slab->config.elements_per_slab = (config->min_elements ? config->min_elements : 1); \
190 slab->alloc = alloc; \
191 slab->reserve = reserve; \
193 slab->release_reset = release_reset; \
194 slab->reserve_mru = reserve_mru; \
195 _name ## _slab_init(&slab->reserved); \
196 _name ## _slab_init(&slab->avail); \
198 if (unlikely(fr_event_timer_in(slab, el, &slab->ev, config->interval, _ ## _name ## _slab_cleanup, slab) < 0)) { \
212 static int _ ## _type ## _element_free(_name ## _slab_element_t *element) \
214 _name ## _slab_t *slab; \
215 if (element->in_use && element->free) element->free(( _type *)element, element->uctx); \
216 if (!element->slab) return 0; \
217 slab = element->slab; \
218 if (element->in_use) { \
219 _name ## _slab_element_remove(&slab->reserved, element); \
221 _name ## _slab_element_remove(&slab->avail, element); \
235 static inline CC_HINT(nonnull) _type *_name ## _slab_reserve(_name ## _slab_list_t *slab_list) \
237 _name ## _slab_t *slab; \
238 _name ## _slab_element_t *element = NULL; \
240 slab = slab_list->reserve_mru ? _name ## _slab_tail(&slab_list->avail) : \
241 _name ## _slab_head(&slab_list->avail); \
242 if (!slab && ((_name ## _slab_num_elements(&slab_list->reserved) * \
243 slab_list->config.elements_per_slab) < slab_list->config.max_elements)) { \
244 _name ## _slab_element_t *new_element; \
245 unsigned int count, elems; \
247 elems = slab_list->config.elements_per_slab * (1 + slab_list->config.num_children); \
248 elem_size = slab_list->config.elements_per_slab * (sizeof(_name ## _slab_element_t) + \
249 slab_list->config.child_pool_size); \
250 MEM(slab = talloc_zero_pooled_object(slab_list, _name ## _slab_t, elems, elem_size)); \
251 _name ## _slab_element_init(&slab->avail); \
252 _name ## _slab_element_init(&slab->reserved); \
253 _name ## _slab_insert_head(&slab_list->avail, slab); \
254 slab->list = slab_list; \
255 for (count = 0; count < slab_list->config.elements_per_slab; count++) { \
256 if (slab_list->config.num_children > 0) { \
257 MEM(new_element = talloc_zero_pooled_object(slab, _name ## _slab_element_t, \
258 slab_list->config.num_children, \
259 slab_list->config.child_pool_size)); \
261 MEM(new_element = talloc_zero(slab, _name ## _slab_element_t)); \
263 talloc_set_type(new_element, _type); \
264 talloc_set_destructor(new_element, _ ## _type ## _element_free); \
265 _name ## _slab_element_insert_tail(&slab->avail, new_element); \
266 new_element->slab = slab; \
272 if (slab_list->alloc) { \
273 _name ## _slab_element_t *prev = NULL; \
274 new_element = NULL; \
275 while ((new_element = _name ## _slab_element_next(&slab->avail, new_element))) { \
276 if (slab_list->alloc((_type *)new_element, slab_list->uctx) < 0) { \
277 prev = _name ## _slab_element_remove(&slab->avail, new_element); \
278 talloc_free(new_element); \
279 new_element = prev; \
284 slab_list->high_water_mark += _name ## _slab_element_num_elements(&slab->avail); \
286 if (!slab && slab_list->config.at_max_fail) return NULL; \
287 if (slab) element = slab_list->reserve_mru ? _name ## _slab_element_pop_tail(&slab->avail) : \
288 _name ## _slab_element_pop_head(&slab->avail); \
290 _name ## _slab_element_insert_tail(&slab->reserved, element); \
291 if (_name ## _slab_element_num_elements(&slab->avail) == 0) { \
292 _name ## _slab_remove(&slab_list->avail, slab); \
293 _name ## _slab_insert_tail(&slab_list->reserved, slab); \
295 element->in_use = true; \
296 slab_list->in_use++; \
298 MEM(element = talloc_zero(slab_list, _name ## _slab_element_t)); \
299 talloc_set_type(element, _type); \
300 talloc_set_destructor(element, _ ## _type ## _element_free); \
301 if (slab_list->alloc) slab_list->alloc((_type *)element, slab_list->uctx); \
303 if (slab_list->reserve) slab_list->reserve((_type *)element, slab_list->uctx); \
304 return (_type *)element; \
313 static inline CC_HINT(nonnull(1,2)) void _name ## _slab_element_set_destructor(_type *elem, _type ## _slab_free_t func, void *uctx) \
315 _name ## _slab_element_t *element = (_name ## _slab_element_t *)elem; \
316 element->free = func; \
317 element->uctx = uctx; \
327 static inline CC_HINT(nonnull) void _name ## _slab_release(_type *elem) \
329 _name ## _slab_element_t *element = (_name ## _slab_element_t *)elem; \
330 _name ## _slab_t *slab = element->slab; \
331 if (element->free) element->free(elem, element->uctx); \
333 _name ## _slab_list_t *slab_list; \
334 slab_list = slab->list; \
335 _name ## _slab_element_remove(&slab->reserved, element); \
336 if (slab_list->release_reset){ \
337 talloc_free_children(element); \
338 memset(&element->elem, 0, sizeof(_type)); \
339 element->free = NULL; \
340 element->uctx = NULL; \
342 _name ## _slab_element_insert_tail(&slab->avail, element); \
343 if (_name ## _slab_element_num_elements(&slab->avail) == 1) { \
344 _name ## _slab_remove(&slab_list->reserved, slab); \
345 _name ## _slab_insert_tail(&slab_list->avail, slab); \
347 slab_list->in_use--; \
348 element->in_use = false; \
351 talloc_free(element); \
354 static inline CC_HINT(nonnull) unsigned int _name ## _slab_num_elements_used(_name ## _slab_list_t *slab_list) \
356 return slab_list->in_use; \
359 static inline CC_HINT(nonnull) unsigned int _name ## _slab_num_allocated(_name ## _slab_list_t *slab_list) \
361 return _name ## _slab_num_elements(&slab_list->reserved) + \
362 _name ## _slab_num_elements(&slab_list->avail); \
364DIAG_ON(unused-function)
fr_time_delta_t interval
Interval between slab cleanup events being fired.
unsigned int min_elements
Minimum number of elements to keep allocated.
unsigned int max_elements
Maximum number of elements to allocate using slabs.
bool at_max_fail
Should requests for additional elements fail when the number in use has reached max_elements.
unsigned int elements_per_slab
Number of elements to allocate per slab.
unsigned int num_children
How many child allocations are expected off each element.
size_t child_pool_size
Size of pool space to be allocated to each element.
Tuneable parameters for slabs.
A time delta, a difference in time measured in nanoseconds.